code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import collections
from supriya import CalculationRate
from supriya.ugens.MultiOutUGen import MultiOutUGen
class PanAz(MultiOutUGen):
"""
A multi-channel equal-power panner.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pan_az = supriya.ugens.PanAz.ar(
... channel_count=8,
... amplitude=1,
... orientation=0.5,
... position=0,
... source=source,
... width=2,
... )
>>> pan_az
UGenArray({8})
"""
### CLASS VARIABLES ###
__documentation_section__ = "Spatialization UGens"
_default_channel_count = 1
_has_settable_channel_count = True
_ordered_input_names = collections.OrderedDict(
[
("source", None),
("position", 0),
("amplitude", 1),
("width", 2),
("orientation", 0.5),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
|
Pulgama/supriya
|
supriya/ugens/PanAz.py
|
Python
|
mit
| 1,001
|
#!/usr/bin/python
# coding: UTF-8
HIRAGANA = list(u'ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろわをんゎゐゑゔゕゖ')
HALF_KANA = [u'ァ',u'ア',u'ィ',u'イ',u'ゥ',u'ウ',u'ェ',u'エ',u'ォ',u'オ',u'カ',u'ガ',u'キ',u'ギ',u'ク',u'グ',u'ケ',u'ゲ',u'コ',u'ゴ',u'サ',u'ザ',u'シ',u'ジ',u'ス',u'ズ',u'セ',u'ゼ',u'ソ',u'ゾ',u'タ',u'ダ',u'チ',u'ヂ',u'ッ',u'ツ',u'ヅ',u'テ',u'デ',u'ト',u'ド',u'ナ',u'ニ',u'ヌ',u'ネ',u'ノ',u'ハ',u'バ',u'バ',u'ヒ',u'ビ',u'ビ',u'フ',u'ブ',u'ブ',u'ヘ',u'ベ',u'ベ',u'ホ',u'ボ',u'ボ',u'マ',u'ミ',u'ム',u'メ',u'モ',u'ャ',u'ヤ',u'ュ',u'ユ',u'ョ',u'ヨ',u'ラ',u'リ',u'ル',u'レ',u'ロ',u'ワ',u'ヲ',u'ン',u'ヴ',u'ヮ',u'ヰ',u'ヱ',u'ヴ',u'ヵ',u'ヶ']
H2HK = dict(zip(HIRAGANA, HALF_KANA))
def _update_ignorechar(ignore, conv_hash):
for character in ignore:
conv_hash[character] = character
return conv_hash
def kana2hankana(str):
h2hk_hash = _update_ignorechar('', H2HK)
return ''.join([H2HK.get(character,character) for character in str])
|
ktrueda/UrbanoContactsConverter
|
kana.py
|
Python
|
mit
| 1,310
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import shutil
from tests import TestCase, mkdtemp
from gi.repository import Gtk
from quodlibet import config
from quodlibet.formats import AudioFile
from quodlibet.player.nullbe import NullPlayer
from quodlibet.qltk.tracker import SongTracker, FSInterface
from quodlibet.library import SongLibrary
class TSongTracker(TestCase):
def setUp(self):
config.init()
self.p = NullPlayer()
self.w = SongLibrary()
self.s1 = AudioFile(
{"~#playcount": 0, "~#skipcount": 0, "~#lastplayed": 10,
"~filename": "foo", "~#length": 1.5})
self.s2 = AudioFile(
{"~#playcount": 0, "~#skipcount": 0, "~#lastplayed": 10,
"~filename": "foo", "~#length": 1.5})
self.cm = SongTracker(self.w, self.p, self)
self.current = None
def do(self):
while Gtk.events_pending():
Gtk.main_iteration()
def test_destroy(self):
self.cm.destroy()
def test_play(self):
import time
# Allow at least 2 second to elapse to simulate playing
self.p.song = self.s1
self.p.paused = False
time.sleep(2)
self.do()
self.p.emit('song-ended', self.s1, False)
self.do()
t = time.time()
self.assertEquals(self.s1["~#playcount"], 1)
self.assertEquals(self.s1["~#skipcount"], 0)
self.failUnless(t - self.s1["~#lastplayed"] <= 1)
def test_skip(self):
self.p.emit('song-ended', self.s1, True)
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 1)
self.failUnless(self.s1["~#lastplayed"], 10)
def test_error(self):
self.current = self.p.song = self.s1
self.p._error('Test error')
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 0)
self.failUnless(self.s1["~#lastplayed"], 10)
def test_restart(self):
self.current = self.s1
self.p.emit('song-ended', self.s1, True)
self.do()
self.assertEquals(self.s1["~#playcount"], 0)
self.assertEquals(self.s1["~#skipcount"], 0)
def tearDown(self):
self.w.destroy()
config.quit()
class TFSInterface(TestCase):
def setUp(self):
self.p = NullPlayer()
self.dir = mkdtemp()
self.filename = os.path.join(self.dir, "foo")
self.fs = FSInterface(self.filename, self.p)
def tearDown(self):
self.p.destroy()
shutil.rmtree(self.dir)
def do(self):
while Gtk.events_pending():
Gtk.main_iteration()
def test_init(self):
self.do()
self.failIf(os.path.exists(self.filename))
def test_start(self):
self.p.emit('song_started', AudioFile({"woo": "bar", "~#length": 10}))
self.do()
with open(self.filename, "rb") as h:
self.failUnless(b"woo=bar\n" in h.read())
def test_song_ended(self):
self.p.emit('song-started', AudioFile({"woo": "bar", "~#length": 10}))
self.do()
self.p.emit('song-ended', {}, False)
self.do()
self.failIf(os.path.exists(self.filename))
|
ptitjes/quodlibet
|
tests/test_qltk_tracker.py
|
Python
|
gpl-2.0
| 3,475
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles the uploading of result CSV to BigQuery."""
import re
import sys
import utils.logger as logger
from absl import app
from absl import flags
from google.cloud import bigquery
def upload_to_bigquery(csv_file_path, project_id, dataset_id, table_id,
location):
"""Uploads the csv file to BigQuery.
Takes the configuration from GOOGLE_APPLICATION_CREDENTIALS.
Args:
csv_file_path: the path to the csv to be uploaded.
project_id: the BigQuery project id.
dataset_id: the BigQuery dataset id.
table_id: the BigQuery table id.
location: the BigQuery table's location.
"""
logger.log('Uploading the data to bigquery.')
client = bigquery.Client(project=project_id)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
job_config.autodetect = False
# load table to get schema
table = client.get_table(table_ref)
job_config.schema = table.schema
with open(str(csv_file_path), 'rb') as source_file:
job = client.load_table_from_file(
source_file, table_ref, location=location, job_config=job_config)
try:
job.result() # Waits for table load to complete.
except Exception:
print('Uploading failed with: %s' % str(job.errors))
sys.exit(-1)
logger.log('Uploaded {} rows into {}:{}.'.format(job.output_rows, dataset_id,
table_id))
FLAGS = flags.FLAGS
flags.DEFINE_string('upload_to_bigquery', None,
'The details of the BigQuery table to upload ' \
'results to: <project_id>:<dataset_id>:<table_id>:<location>')
def main(argv):
if not re.match('^[\w-]+:[\w-]+:[\w-]+:[\w-]+$', FLAGS.upload_to_bigquery):
raise ValueError('--upload_to_bigquery should follow the pattern '
'<project_id>:<dataset_id>:<table_id>:<location>.')
# Discard the first argument.
csv_files_to_upload = argv[1:]
project_id, dataset_id, table_id, location = FLAGS.upload_to_bigquery.split(
':')
for filename in csv_files_to_upload:
upload_to_bigquery(filename, project_id, dataset_id, table_id, location)
if __name__ == '__main__':
app.run(main)
|
bazelbuild/bazel-bench
|
utils/bigquery_upload.py
|
Python
|
apache-2.0
| 2,925
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
integrated_helpers.InstanceHelperMixin):
"""Functional tests for creating a server from a multiattach volume
and attaching a multiattach volume to a server.
Uses the CinderFixtureNewAttachFlow fixture with a specific volume ID
to represent a multiattach volume.
"""
# These are all used in _IntegratedTestBase.
USE_NEUTRON = True
api_major_version = 'v2.1'
microversion = '2.60'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
def setUp(self):
# Everything has been upgraded to the latest code to support
# multiattach.
self.useFixture(nova_fixtures.AllServicesCurrent())
super(TestMultiattachVolumes, self).setUp()
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.NeutronFixture(self))
def test_boot_from_volume_and_attach_to_second_server(self):
"""This scenario creates a server from the multiattach volume, waits
for it to be ACTIVE, and then attaches the volume to another server.
"""
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.MULTIATTACH_VOL
create_req = self._build_server(flavor_id='1', image='')
create_req['networks'] = 'none'
create_req['block_device_mapping_v2'] = [{
'uuid': volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'boot_index': 0
}]
server = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server, 'ACTIVE')
# Make sure the volume is attached to the first server.
attachments = self.api.api_get(
'/servers/%s/os-volume_attachments' % server['id']).body[
'volumeAttachments']
self.assertEqual(1, len(attachments))
self.assertEqual(server['id'], attachments[0]['serverId'])
self.assertEqual(volume_id, attachments[0]['volumeId'])
# Now create a second server and attach the same volume to that.
create_req = self._build_server(
flavor_id='1', image='155d900f-4e14-4e4c-a73d-069cbf4541e6')
create_req['networks'] = 'none'
server2 = self.api.post_server({'server': create_req})
self._wait_for_state_change(self.api, server2, 'ACTIVE')
# Attach the volume to the second server.
self.api.api_post('/servers/%s/os-volume_attachments' % server2['id'],
{'volumeAttachment': {'volumeId': volume_id}})
# Make sure the volume is attached to the second server.
attachments = self.api.api_get(
'/servers/%s/os-volume_attachments' % server2['id']).body[
'volumeAttachments']
self.assertEqual(1, len(attachments))
self.assertEqual(server2['id'], attachments[0]['serverId'])
self.assertEqual(volume_id, attachments[0]['volumeId'])
|
phenoxim/nova
|
nova/tests/functional/test_multiattach.py
|
Python
|
apache-2.0
| 3,741
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the annotation of disk objects with metadata."""
import unittest
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.aws import aws_virtual_machine
from perfkitbenchmarker.providers.azure import azure_disk
from perfkitbenchmarker.providers.azure import flags as azure_flags
from perfkitbenchmarker.providers.azure import azure_virtual_machine
from perfkitbenchmarker.providers.gcp import gce_disk
class GcpDiskMetadataTest(unittest.TestCase):
def testPDStandard(self):
disk_spec = disk.BaseDiskSpec(
disk_size=2,
disk_type=gce_disk.PD_STANDARD,
mount_point=None)
disk_obj = gce_disk.GceDisk(
disk_spec,
'name',
'zone',
'project')
self.assertEquals(disk_obj.metadata,
{disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.ZONE,
disk.LEGACY_DISK_TYPE: disk.STANDARD})
class AwsDiskMetadataTest(unittest.TestCase):
def doAwsDiskTest(self, disk_type, machine_type,
goal_media, goal_replication, goal_legacy_disk_type):
disk_spec = aws_disk.AwsDiskSpec(
disk_size=2,
disk_type=disk_type,
mount_point=None)
context.SetThreadBenchmarkSpec(benchmark_spec.BenchmarkSpec(
{}, 'name', 'uid'))
vm_spec = virtual_machine.BaseVmSpec(
zone='us-east-1a',
machine_type=machine_type)
vm = aws_virtual_machine.DebianBasedAwsVirtualMachine(
vm_spec)
vm.CreateScratchDisk(disk_spec)
self.assertEqual(vm.scratch_disks[0].metadata,
{disk.MEDIA: goal_media,
disk.REPLICATION: goal_replication,
disk.LEGACY_DISK_TYPE: goal_legacy_disk_type})
def testLocalSSD(self):
self.doAwsDiskTest(
disk.LOCAL,
'c3.2xlarge',
disk.SSD,
disk.NONE,
disk.LOCAL)
def testLocalHDD(self):
self.doAwsDiskTest(
disk.LOCAL,
'd2.2xlarge',
disk.HDD,
disk.NONE,
disk.LOCAL)
class AzureDiskMetadataTest(unittest.TestCase):
def doAzureDiskTest(self, storage_type, disk_type, machine_type,
goal_media, goal_replication, goal_legacy_disk_type):
with mock.patch(azure_disk.__name__ + '.FLAGS') as disk_flags:
disk_flags.azure_storage_type = storage_type
disk_spec = disk.BaseDiskSpec(
disk_size=2,
disk_type=disk_type,
mount_point=None)
context.SetThreadBenchmarkSpec(benchmark_spec.BenchmarkSpec(
{}, 'name', 'uid'))
vm_spec = virtual_machine.BaseVmSpec(
zone='East US 2',
machine_type=machine_type)
vm = azure_virtual_machine.DebianBasedAzureVirtualMachine(
vm_spec)
azure_disk.AzureDisk.Create = mock.Mock()
azure_disk.AzureDisk.Attach = mock.Mock()
vm.CreateScratchDisk(disk_spec)
self.assertEqual(vm.scratch_disks[0].metadata,
{disk.MEDIA: goal_media,
disk.REPLICATION: goal_replication,
disk.LEGACY_DISK_TYPE: goal_legacy_disk_type})
def testPremiumStorage(self):
self.doAzureDiskTest(azure_flags.PLRS,
azure_disk.PREMIUM_STORAGE,
'Standard_D1',
disk.SSD,
disk.ZONE,
disk.REMOTE_SSD)
def testStandardDisk(self):
self.doAzureDiskTest(azure_flags.ZRS,
azure_disk.STANDARD_DISK,
'Standard_D1',
disk.HDD,
disk.REGION,
disk.STANDARD)
def testLocalHDD(self):
self.doAzureDiskTest(azure_flags.LRS,
disk.LOCAL,
'Standard_A1',
disk.HDD,
disk.NONE,
disk.LOCAL)
def testLocalSSD(self):
self.doAzureDiskTest(azure_flags.LRS,
disk.LOCAL,
'Standard_DS2',
disk.SSD,
disk.NONE,
disk.LOCAL)
|
akshara775/PerfKitBenchmarker-master-2
|
tests/disk_metadata_test.py
|
Python
|
apache-2.0
| 5,019
|
"""
Proxy User and Role models.
"""
import base64
import hashlib
from django.db.models import Q
from django.contrib.auth.backends import ModelBackend as DjangoModelBackend
# Permission is imported solely so other places can import it from here
from django.contrib.auth.models import User as BaseUser, Group, Permission
from django_browserid.auth import BrowserIDBackend as BaseBrowserIDBackend
from preferences import preferences
from registration.models import RegistrationProfile
from registration.signals import user_registered
# monkeypatch the User model to ensure unique email addresses
BaseUser._meta.get_field("email")._unique = True
class User(BaseUser):
"""Proxy for contrib.auth User that adds action methods and roles alias."""
class Meta:
proxy = True
def delete(self, user=None):
"""
Delete this user.
We ignore the passed-in user since User is not a MTModel and doesn't
track created_by / modified_by.
We have to delete registration profiles manually, to avoid
https://code.djangoproject.com/ticket/16128.
"""
if (self.is_superuser == True and self.is_active == True and
User.objects.filter(is_superuser=True).filter(is_active=True).count() == 1):
return
# @@@ Django ticket 16128, hopefully fixed in 1.4?
# RegistrationProfile's FK is to Django's user model, not ours
RegistrationProfile.objects.filter(user=self).delete()
super(User, self).delete()
def activate(self, user=None):
"""Activate this user."""
self.is_active = True
self.save(force_update=True)
def deactivate(self, user=None):
"""Deactivate this user."""
self.is_active = False
self.save(force_update=True)
def save(self, force_insert=False, force_update=False, using=None):
if (not force_insert and self.is_superuser and not self.is_active and
User.objects.filter(is_superuser=True).filter(is_active=True).count() == 1):
from django.shortcuts import get_object_or_404
user = get_object_or_404(User, pk=self.id)
# check whether the user is exactly the last `active` superuser or not
if user.is_active:
self.is_active = True
super(User, self).save(force_insert=force_insert,
force_update=force_update, using=using)
@property
def roles(self):
"""Maps our name (roles) to Django name (groups)."""
return self.groups
Role = Group
class ModelBackend(DjangoModelBackend):
"""Accepts username or email and returns our proxy User model."""
def authenticate(self, username=None, password=None):
"""Return User for given credentials, or None."""
candidates = User.objects.filter(Q(username=username) | Q(email=username))
for user in candidates:
if user.check_password(password):
return user
return None
def get_user(self, user_id):
"""Return User for given ID, or None."""
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class BrowserIDBackend(BaseBrowserIDBackend):
"""BrowserID backend that returns our proxy user."""
def filter_users_by_email(self, email):
"""Return all users matching the specified email."""
return User.objects.filter(email=email)
def get_user(self, user_id):
"""Return User for given ID, or None."""
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
AUTO_USERNAME_PREFIX = ":auto:"
USERNAME_MAX_LENGTH = User._meta.get_field("username").max_length
DIGEST_LENGTH = USERNAME_MAX_LENGTH - len(AUTO_USERNAME_PREFIX)
def browserid_create_user(email):
"""Create and return a new User for a new BrowserID login."""
digest = base64.urlsafe_b64encode(hashlib.sha1(email).digest())
username = AUTO_USERNAME_PREFIX + digest[:DIGEST_LENGTH]
user = User.objects.create_user(username=username, email=email)
add_new_user_role(user)
return user
def add_new_user_role(user, **kwargs):
role = preferences.CorePreferences.default_new_user_role
if role is not None:
# Have to use groups, not roles, because registration doesn't send our
# proxy User with its signal.
user.groups.add(role)
user_registered.connect(add_new_user_role)
|
peterbe/moztrap
|
moztrap/model/core/auth.py
|
Python
|
bsd-2-clause
| 4,492
|
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
"""
@note: This test is not set up to use proxies.
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
@change: 2016/02/10 roy Added functionality to testInstallPkg test
@change: 2016/08/30 eball Added conditional to SkipTest for Python < 2.7
@author: Roy Nielsen
"""
import os
import re
import sys
import ctypes
import shutil
import unittest
sys.path.append("../../../..")
from src.stonix_resources.localize import MACREPOROOT
from src.stonix_resources.macpkgr import MacPkgr
from src.stonix_resources.environment import Environment
from src.stonix_resources.CommandHelper import CommandHelper
from src.stonix_resources.Connectivity import Connectivity
from src.tests.lib.logdispatcher_lite import LogDispatcher, LogPriority
class NotApplicableToThisOS(Exception):
'''Custom Exception'''
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
@unittest.skipUnless(sys.platform.startswith("darwin"), "RamDisk does not support this OS family")
class zzzTestFrameworkMacPkgr(unittest.TestCase):
'''Class for testing the macpkgr.'''
@classmethod
def setUpClass(self):
''' '''
self.environ = Environment()
self.logger = LogDispatcher(self.environ)
self.osfamily = self.environ.getosfamily()
self.logger.log(LogPriority.DEBUG, "##################################")
self.logger.log(LogPriority.DEBUG, "### OS Family: " + str(self.osfamily))
self.logger.log(LogPriority.DEBUG, "##################################")
self.libc = ctypes.CDLL("/usr/lib/libc.dylib")
self.logger = LogDispatcher(self.environ)
self.macPackageName = "testStonixMacPkgr-0.0.3.pkg"
self.reporoot = MACREPOROOT
#####
# Create a class variable that houses the whole URL
if self.reporoot.endswith("/"):
self.pkgUrl = self.reporoot + self.macPackageName
else:
self.pkgUrl = self.reporoot + "/" + self.macPackageName
message = "self.pkgUrl: " + str(self.pkgUrl)
self.pkgr = MacPkgr(self.environ, self.logger)
self.pkg_dirs = ["/tmp/testStonixMacPkgr-0.0.3/one/two/three/3.5", \
"/tmp/testStonixMacPkgr-0.0.3/one/two/three", \
"/tmp/testStonixMacPkgr-0.0.3/one/two", \
"/tmp/testStonixMacPkgr-0.0.3/one", \
"/tmp/testStonixMacPkgr-0.0.3/one/two/four/five", \
"/tmp/testStonixMacPkgr-0.0.3/one/two/four", \
"/tmp/testStonixMacPkgr-0.0.3/one/two", \
"/tmp/testStonixMacPkgr-0.0.3/one/six/seven"]
self.pkg_files = ["/tmp/testStonixMacPkgr-0.0.3/one/two/testfile1", \
"/tmp/testStonixMacPkgr-0.0.3/one/two/four/five/testfile2", \
"/tmp/testStonixMacPkgr-0.0.3/one/testfile3", \
"/tmp/testStonixMacPkgr-0.0.3/one/testfile4", \
"/tmp/testStonixMacPkgr-0.0.3/one/six/seven/testfile"]
self.post_files = ["/tmp/testStonixMacPkgr-0.0.3/one/postfile2", \
"/tmp/testStonixMacPkgr-0.0.3/one/two/three/3.5/postfile3"]
self.post_dirs = ["/tmp/testStonixMacPkgr-0.0.3/one/six/6.5"]
self.all_files = [self.pkg_files, self.post_files]
self.all_dirs = [self.pkg_dirs, self.post_dirs]
self.allowed_files_and_dirs = [self.pkg_dirs,
self.pkg_dirs,
self.post_dirs]
self.ch = CommandHelper(self.logger)
self.connection = Connectivity(self.logger)
self.testDomain = "gov.lanl.testStonixMacPkgr.0.0.3.testStonixMacPkgr"
############################################################################
"""
def setUp(self):
self.osfamily = self.environ.getosfamily()
if re.match("^macosx$", self.osfamily.strip()):
myos = self.environ.getosfamiliy()
raise unittest.SkipTest("RamDisk does not support this OS" + \
" family: " + str(myos))
"""
############################################################################
@classmethod
def tearDownClass(self):
'''Make sure the appropriate files are removed..'''
pass
############################################################################
def test_inLinearFlow(self):
'''Run methods or functionality that requires order, ie a happens before b
Like ensure a package is installed before testing if uninstall works.
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
if not self.connection.isPageAvailable():
self.logger.log(LogPriority.INFO, "This test fails without a " + \
"properly configured Mac " + \
"repository, so we are not " + \
"running actual tests...")
else:
#####
# Remove the package in case it is installed, so we have a sane,
# consistent starting point for the test.
self.removeCompletePackage()
#####
# Install the package
self.assertTrue(self.pkgr.installPackage(self.macPackageName),
"Problem with pkgr.installpackage...")
#####
# Use the macpkgr method to check if the package is installed
self.assertTrue(self.pkgr.checkInstall(self.macPackageName),
"Problem with pkgr.checkInstall...")
#####
# Manual check to see if the package is installed
self.assertTrue(self.isInstalled(), "Problem with installation...")
#####
# Make sure it isn't a partial install...
self.assertTrue(self.isFullInstall(), "Partial install...")
#####
# Remove the package, assert that it worked.
self.assertTrue(self.pkgr.removePackage(self.macPackageName),
"Problem removing package...")
#####
# Check that checkInstall returns the correct value
self.assertFalse(self.pkgr.checkInstall(self.macPackageName),
"Problem with pkgr.checkinstall...")
#####
# Hand verify that self.pkgr.checkInstall worked.
self.assertTrue(self.isMissing(), "Problem with package removal...")
#####
# Remove any presence of the package installed.
self.removeCompletePackage()
############################################################################
def testCheckInstall(self):
'''Test the checkInstall method.
1 - make sure the test .pkg is NOT installed
2 - download the package and check the md5
3 - use custom installer command to install the package
4 - call the checkInstall method
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
if not self.connection.isPageAvailable():
self.logger.log(LogPriority.INFO, "This test fails without a " + \
"properly configured Mac " + \
"repository, so we are not " + \
"running actual tests...")
else:
#####
# make sure the test .pkg is NOT installed
self.pkgr.removePackage(self.macPackageName)
#####
# Test the checkInstall with the package removed
self.assertFalse(self.pkgr.checkInstall(self.macPackageName))
self.assertFalse(self.isInstalled())
#####
# Install the package
self.pkgr.installPackage(self.macPackageName)
#####
# run checkInstall again
self.assertTrue(self.pkgr.checkInstall(self.macPackageName))
self.assertTrue(self.isInstalled())
############################################################################
def testCheckAvailable(self):
'''Check if a package is available on the reporoot.
Must have both the file AND the md5 checksum file on the server of the
format:
.<filename>.<UPPER-md5sum>
Steps for this test:
1 - set up self.pkgr.pkgUrl
2 - run self.pkgr.downloadPackage
3 - Make sure the checksum matches, otherwise there is a bad md5
for the download, and the package should not be trusted, let alone
installed.
This covers two test cases -
checkAvailable
downloadPackage
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
if not self.connection.isPageAvailable():
self.logger.log(LogPriority.INFO, "This test fails without a " + \
"properly configured Mac " + \
"repository, so we are not " + \
"running actual tests...")
else:
self.assertTrue(self.reporoot + self.macPackageName)
self.pkgr.setPkgUrl(self.reporoot + self.macPackageName)
self.pkgr.package = self.macPackageName
self.assertTrue(self.pkgr.downloadPackage(), "Package: " + \
str(self.pkgr.getPkgUrl()) + " FAILED download...")
self.assertTrue(self.pkgr.checkMd5(), "MD5 checksum didn't match - " + \
"package: " + str(self.pkgr.hashUrl) + " is NOT " + \
"available...")
############################################################################
def testFindDomain(self):
'''Test the findDomain function. The domain is required to do a reverse
lookup in the local client package receipt database. It should find
all the files that have been installed by the PACKAGE, not the
postflight.
Will remove the test package if it exists, install the package then
use the test package to make sure the package file list is accurate.
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
if not self.connection.isPageAvailable():
self.logger.log(LogPriority.INFO, "This test fails without a " + \
"properly configured Mac " + \
"repository, so we are not " + \
"running actual tests...")
else:
#####
# Make sure the package is installed
self.pkgr.installPackage("testStonixMacPkgr-0.0.3.pkg")
#####
# Assert findDomain works properly when the package is installed
self.assertEqual(self.testDomain,
self.pkgr.findDomain("testStonixMacPkgr-0.0.3.pkg"))
############################################################################
def testUnArchive(self):
'''Download a tar package with the test pkg in it.
Will test doing a download and checksum of the following by downloading
the file and doing a checksum, then unzipping the file, and check
the internal filename:
testStonixMacPkgr.zip
@Note: *** Functionality needs approval ***
@author: Roy Nielsen
'''
pass
############################################################################
def testCopyInstall(self):
'''Tests the copyInstall method.
Will test by:
Downloading the test .tar file with a .app in it, doing a checksum of
the .tar file then performing a copyInstall.
Will test by checking the existence of the .app being in the right
place.
@author: Roy Nielsen
'''
pass
############################################################################
def testInstallPkg(self):
'''Tests the installPkg method.
Will:
Make sure the test pkg is not installed
Download and checksum the file.
install the .pkg with the installPkg method.
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
success = False
try:
#####
# make sure the test .pkg is NOT installed
self.pkgr.removePackage(self.macPackageName)
except:
pass
#####
# Check the URL for validity, or make sure we can get there..
if self.connection.isPageAvailable(self.pkgUrl):
#####
# Set the pkgurl in the package manager
self.pkgr.setPkgUrl(self.pkgUrl)
#####
# Download into a temporary directory
success = self.pkgr.downloadPackage()
if success:
#####
# Apple operating systems have a lazy attitude towards
# writing to disk - the package doesn't get fully
# written to disk until the following method is called.
# Otherwise when the downloaded package is further
# manipulated, (uncompressed or installed) the
# downloaded file is not there. There may be other
# ways to get python to do the filesystem sync...
try:
self.libc.sync()
except:
pass
#####
# Make sure the md5 of the file matches that of the
# server
if self.pkgr.checkMd5():
#####
# unarchive if necessary
compressed = [".tar", ".tar.gz", ".tgz",
".tar.bz", ".tbz", ".zip"]
for extension in compressed:
if self.pkgUrl.endswith(extension):
self.pkgr.unArchive()
try:
self.libc.sync()
except:
pass
#####
# install - if extension is a .pkg or .mpkg use the
# installer command
if self.pkgUrl.endswith (".pkg") or \
self.pkgUrl.endswith (".mpkg"):
success = self.pkgr.installPkg()
self.assertTrue(success)
else:
self.assertTrue(False)
else:
self.assertTrue(False)
else:
self.assertTrue(False)
else:
self.logger.log(LogPriority.INFO, "Not able to connect to server...")
self.assertTrue(True)
if success:
#####
# run checkInstall again
self.assertTrue(self.pkgr.checkInstall(self.macPackageName))
self.assertTrue(self.isInstalled())
try:
#####
# make sure the test .pkg is NOT installed
self.pkgr.removePackage(self.macPackageName)
except:
pass
############################################################################
def testIsMacPlatform(self):
'''Make sure we are on the Mac platform.
@author: Roy Nielsen
'''
if sys.version_info < (2, 7):
return
if not self.connection.isPageAvailable():
self.logger.log(LogPriority.INFO, "This test fails without a " + \
"properly configured Mac " + \
"repository, so we are not " + \
"running actual tests...")
else:
self.assertTrue(self.environ.osfamily == "darwin", "Wrong OS...")
############################################################################
def isFullInstall(self):
'''Make sure that all files and directories including those installed from
the package and the postinstall script exist.
@Note: In future, this should also do a receipt test as well. This
would include getting the files from the receipt and checking
for their existence and perhaps their permissions.
@author: Roy Nielsen
'''
files = self.doFilesExistTest(self.all_files)
dirs = self.doDirsExist(self.all_dirs)
if files and dirs:
return True
return False
############################################################################
def isInstalled(self):
'''Test to make sure just the files and directories installed by the
package are installed. Doesn't care about the files and directories
installed by the postinstall script.
@author: Roy Nielsen
'''
files = self.doFilesExistTest([self.pkg_files])
dirs = self.doDirsExist([self.pkg_dirs])
if files and dirs:
return True
return False
############################################################################
def isMissing(self):
'''Test to make sure all the files have been removed that were Installed
by the package. Ignore, but note directories installed by the package
that exist, as well as files and directories installed by the
postinstall script.
@Note: In future, this test should check for a package receipt, and
make sure the files in the package receipt do not exist. This
is only valid for this package, as in the case of some software,
like Adobe products, some of the files are shared libraries
between different products.
@author: Roy Nielsen
'''
removed = []
exists = []
#####
# Cycle through each subset of files in the
for myfile in self.pkg_files:
if os.path.isfile(myfile):
self.logger.log(LogPriority.WARNING, "File: " + \
str(myfile) + " exists...")
removed.append(False)
exists.append(myfile)
self.assertFalse(False in removed, "Some files exist: " + str(exists))
#####
# cycle through each set of directories in all_dirs
for myset in self.allowed_files_and_dirs:
#####
# Cycle through each subset of files in the
for myfile in myset:
if os.path.isdir(myfile):
self.logger.log(LogPriority.INFO, "Item: " + \
str(myfile) + " exists...")
if False in removed:
return False
return True
############################################################################
def removeCompletePackage(self):
'''Remove all files, used to set the stage for install tests.
@author: Roy Nielsen
'''
success = False
try:
testPath = "/tmp/testStonixMacPkgr-0.0.3"
if os.path.exists(testPath):
shutil.rmtree(testPath)
except Exception as err:
self.logger.log(LogPriority.INFO, "Test set already missing?")
raise err
else:
self.logger.log(LogPriority.INFO, "Removed test package " + \
"install set...")
success = True
#####
# If the rmtree directive above did not throw an exception, make the
# system "forget" the package
if success:
#####
# get the domain, so we can "forget" the package
domain = self.pkgr.findDomain(self.macPackageName)
#####
# Also need to remove the package receipt...
# use pkgutil --forget
cmd = ["/usr/sbin/pkgutil", "--forget", domain]
self.ch.executeCommand(cmd)
if not self.ch.getReturnCode() == 0:
success = False
return success
############################################################################
def doFilesExistTest(self, files=[False]):
'''Test the directories in the passed in list to see if they all exist.
@author: Roy Nielsen
:param files: (Default value = [False])
'''
not_installed = []
exists = []
#####
# cycle through each set of files in all_files
for myset in files:
#####
# Cycle through each subset of files in the
for myfile in myset:
if not os.path.isfile(myfile):
self.logger.log(LogPriority.WARNING, "File: " + \
str(myfile) + " does not exist...")
exists.append(False)
not_installed.append(str(myfile))
if False in exists:
message = "Not all files exist: " + str(not_installed)
self.logger.log(LogPriority.DEBUG, message)
return False
return True
############################################################################
def doDirsExist(self, dirs=[False]):
'''Check the directories in the passed in list to see if they all exist.
@author: Roy Nielsen
:param dirs: (Default value = [False])
'''
not_installed = []
exists = []
#####
# cycle through each set of directories in all_dirs
for myset in dirs:
#####
# Cycle through each subset of files in the
for mydir in myset:
if not os.path.isdir(mydir):
self.logger.log(LogPriority.WARNING, "Directory: " + \
str(mydir) + " does not exist...")
exists.append(False)
not_installed.append(str(mydir))
if False in exists:
message = "Not all files exist: " + str(not_installed)
self.logger.log(LogPriority.DEBUG, message)
return False
return True
|
CSD-Public/stonix
|
src/tests/framework/network_tests/zzzTestFrameworkMacPkgr.py
|
Python
|
gpl-2.0
| 24,533
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-17 20:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_seen_at',
field=models.DateTimeField(default=None, null=True),
),
]
|
Compizfox/Inschrijflijst
|
app/migrations/0002_user_last_seen_at.py
|
Python
|
gpl-3.0
| 452
|
"""
You are a professional robber planning to rob houses along a street.
Each house has a certain amount of money stashed,
the only constraint stopping you from robbing each of them
is that adjacent houses have security system connected and
it will automatically contact the police if two adjacent houses
were broken into on the same night.
Given a list of non-negative integers representing the amount of money
of each house, determine the maximum amount of money you
can rob tonight without alerting the police.
"""
def house_robber(houses):
last, now = 0, 0
for house in houses:
last, now = now, max(last + house, now)
return now
|
keon/algorithms
|
algorithms/dp/house_robber.py
|
Python
|
mit
| 656
|
# According to A8.4
LSL = 0x0
LSR = 0x1
ASR = 0x2
ROR = 0x3
RRX = 0x3 # with imm = 0
|
oblique-labs/pyVM
|
rpython/jit/backend/arm/shift.py
|
Python
|
mit
| 86
|
class TriggerCall(object):
PROCESS_TRIGGER = 'PROCESS_TRIGGER'
TRY_NEXT_ACTION = 'TRY_NEXT_ACTION'
COMPLETE_ACTION = 'COMPLETE_ACTION'
class SubscriptionCall(object):
GENERATE = 'GENERATE'
|
RetailMeNotSandbox/dart
|
src/python/dart/message/call.py
|
Python
|
mit
| 207
|
# This file can mimic juypter running. Useful for testing jupyter crash handling
import sys
import argparse
import time
def main():
print("hello from dummy jupyter")
parser = argparse.ArgumentParser()
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
parser.add_argument("--notebook-dir", default="")
parser.add_argument("--config", default="")
results = parser.parse_args()
if results.version:
print("1.1.dummy")
else:
print(
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
)
time.sleep(5)
raise Exception("Dummy is dead")
if __name__ == "__main__":
main()
|
glenngillen/dotfiles
|
.vscode/extensions/ms-toolsai.jupyter-2021.6.832593372/pythonFiles/vscode_datascience_helpers/dummyJupyter.py
|
Python
|
mit
| 889
|
# Copyright 2017 DT42
#
# This file is part of BerryNet.
#
# BerryNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BerryNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BerryNet. If not, see <http://www.gnu.org/licenses/>.
"""Engine service is a bridge between incoming data and inference engine.
"""
import argparse
import logging
from datetime import datetime
from berrynet import logger
from berrynet.comm import payload
from berrynet.dlmodelmgr import DLModelManager
from berrynet.engine.movidius_engine import MovidiusEngine
from berrynet.engine.movidius_engine import MovidiusMobileNetSSDEngine
from berrynet.service import EngineService
from berrynet.utils import draw_bb
from berrynet.utils import generate_class_color
class MovidiusClassificationService(EngineService):
def __init__(self, service_name, engine, comm_config):
super(MovidiusClassificationService, self).__init__(service_name,
engine,
comm_config)
def result_hook(self, generalized_result):
logger.debug('result_hook, annotations: {}'.format(generalized_result['annotations']))
self.comm.send('berrynet/engine/mvclassification/result',
payload.serialize_payload(generalized_result))
class MovidiusMobileNetSSDService(EngineService):
def __init__(self, service_name, engine, comm_config, draw=False):
super(MovidiusMobileNetSSDService, self).__init__(service_name,
engine,
comm_config)
self.draw = draw
def inference(self, pl):
duration = lambda t: (datetime.now() - t).microseconds / 1000
t = datetime.now()
logger.debug('payload size: {}'.format(len(pl)))
logger.debug('payload type: {}'.format(type(pl)))
jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
logger.debug('destringify_jpg: {} ms'.format(duration(t)))
t = datetime.now()
bgr_array = payload.jpg2bgr(jpg_bytes)
logger.debug('jpg2bgr: {} ms'.format(duration(t)))
t = datetime.now()
image_data = self.engine.process_input(bgr_array)
output = self.engine.inference(image_data)
model_outputs = self.engine.process_output(output)
logger.debug('Result: {}'.format(model_outputs))
logger.debug('Detection takes {} ms'.format(duration(t)))
classes = self.engine.classes
labels = self.engine.labels
logger.debug('draw = {}'.format(self.draw))
if self.draw is False:
self.result_hook(self.generalize_result(jpg_json, model_outputs))
else:
self.result_hook(
draw_bb(bgr_array,
self.generalize_result(jpg_json, model_outputs),
generate_class_color(class_num=classes),
labels))
def result_hook(self, generalized_result):
logger.debug('result_hook, annotations: {}'.format(generalized_result['annotations']))
self.comm.send('berrynet/engine/mvmobilenetssd/result',
payload.serialize_payload(generalized_result))
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--model',
help='Model file path')
ap.add_argument('--label',
help='Label file path')
ap.add_argument('--model_package',
default='',
help='Model package name')
ap.add_argument('--service_name', required=True,
help='Valid value: Classification, MobileNetSSD')
ap.add_argument('--num_top_predictions', default=5,
help='Display this many predictions')
ap.add_argument('--draw',
action='store_true',
help='Draw bounding boxes on image in result')
ap.add_argument('--debug',
action='store_true',
help='Debug mode toggle')
return vars(ap.parse_args())
def main():
# Test Movidius engine
args = parse_args()
if args['debug']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if args['model_package'] != '':
dlmm = DLModelManager()
meta = dlmm.get_model_meta(args['model_package'])
args['model'] = meta['model']
args['label'] = meta['label']
logger.debug('model filepath: ' + args['model'])
logger.debug('label filepath: ' + args['label'])
comm_config = {
'subscribe': {},
'broker': {
'address': 'localhost',
'port': 1883
}
}
if args['service_name'] == 'Classification':
mvng = MovidiusEngine(args['model'], args['label'])
service_functor = MovidiusClassificationService
elif args['service_name'] == 'MobileNetSSD':
mvng = MovidiusMobileNetSSDEngine(args['model'], args['label'])
service_functor = MovidiusMobileNetSSDService
else:
logger.critical('Legal service names are Classification, MobileNetSSD')
engine_service = service_functor(args['service_name'],
mvng,
comm_config,
draw=args['draw'])
engine_service.run(args)
if __name__ == '__main__':
main()
|
DT42/BerryNet
|
berrynet/service/movidius_service.py
|
Python
|
gpl-3.0
| 5,991
|
#!/usr/bin/env python2
import argparse
import pandas as pd
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Plots a boxplot of the DCJ distances and estimates from random walks.")
parser.add_argument("-n", type=int, required=True, help="Genome size")
parser.add_argument("-m", type=int, required=True, help="Max size of walk")
parser.add_argument("-s", type=int, default=4, help="Step in walk lengths")
parser.add_argument("-r", type=int, default=20, help="Number of repeats")
param = parser.parse_args()
data = {"DCJ": [], "ML": [], "eDCJ": [], 'Real Distance': []}
step_range = range(param.s, param.m, param.s)
for step in step_range:
ml_rep_data = []
dcj_rep_data = []
est_rep_data = []
for rep in range(1, 11):
with open("rw.n%d.step%d.rep%d/genomes.txt.ml" % (param.n, step, rep)) as f:
l = f.readline()
dcj, ml, est = map(float, f.readline().strip().split())
# dcj_rep_data.append(dcj - step)
# ml_rep_data.append(ml - step)
# est_rep_data.append(est - step)
data["DCJ"].append(dcj - step)
data["ML"].append(ml - step)
data["eDCJ"].append(est - step)
data["Real Distance"].append(step)
# PANDA:
del data["DCJ"]
df = pd.DataFrame.from_dict(data)
df.boxplot(by='Real Distance', layout=(3,1), figsize=(14,10))
df.boxplot(by='Real Distance', figsize=(14, 10))
plt.savefig('rw_%s_results.pdf' % param.n, bbox_inches='tight')
|
pedrofeijao/RINGO
|
src/ringo/plot_ml_estimate.py
|
Python
|
mit
| 1,794
|
"""
Support for IP Webcam, an Android app that acts as a full-featured webcam.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/android_ip_webcam/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
CONF_SENSORS, CONF_SWITCHES, CONF_TIMEOUT, CONF_SCAN_INTERVAL,
CONF_PLATFORM)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.components.camera.mjpeg import (
CONF_MJPEG_URL, CONF_STILL_IMAGE_URL)
REQUIREMENTS = ['pydroid-ipcam==0.8']
_LOGGER = logging.getLogger(__name__)
ATTR_AUD_CONNS = 'Audio Connections'
ATTR_HOST = 'host'
ATTR_VID_CONNS = 'Video Connections'
CONF_MOTION_SENSOR = 'motion_sensor'
DATA_IP_WEBCAM = 'android_ip_webcam'
DEFAULT_NAME = 'IP Webcam'
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 10
DOMAIN = 'android_ip_webcam'
SCAN_INTERVAL = timedelta(seconds=10)
SIGNAL_UPDATE_DATA = 'android_ip_webcam_update'
KEY_MAP = {
'audio_connections': 'Audio Connections',
'adet_limit': 'Audio Trigger Limit',
'antibanding': 'Anti-banding',
'audio_only': 'Audio Only',
'battery_level': 'Battery Level',
'battery_temp': 'Battery Temperature',
'battery_voltage': 'Battery Voltage',
'coloreffect': 'Color Effect',
'exposure': 'Exposure Level',
'exposure_lock': 'Exposure Lock',
'ffc': 'Front-facing Camera',
'flashmode': 'Flash Mode',
'focus': 'Focus',
'focus_homing': 'Focus Homing',
'focus_region': 'Focus Region',
'focusmode': 'Focus Mode',
'gps_active': 'GPS Active',
'idle': 'Idle',
'ip_address': 'IPv4 Address',
'ipv6_address': 'IPv6 Address',
'ivideon_streaming': 'Ivideon Streaming',
'light': 'Light Level',
'mirror_flip': 'Mirror Flip',
'motion': 'Motion',
'motion_active': 'Motion Active',
'motion_detect': 'Motion Detection',
'motion_event': 'Motion Event',
'motion_limit': 'Motion Limit',
'night_vision': 'Night Vision',
'night_vision_average': 'Night Vision Average',
'night_vision_gain': 'Night Vision Gain',
'orientation': 'Orientation',
'overlay': 'Overlay',
'photo_size': 'Photo Size',
'pressure': 'Pressure',
'proximity': 'Proximity',
'quality': 'Quality',
'scenemode': 'Scene Mode',
'sound': 'Sound',
'sound_event': 'Sound Event',
'sound_timeout': 'Sound Timeout',
'torch': 'Torch',
'video_connections': 'Video Connections',
'video_chunk_len': 'Video Chunk Length',
'video_recording': 'Video Recording',
'video_size': 'Video Size',
'whitebalance': 'White Balance',
'whitebalance_lock': 'White Balance Lock',
'zoom': 'Zoom'
}
ICON_MAP = {
'audio_connections': 'mdi:speaker',
'battery_level': 'mdi:battery',
'battery_temp': 'mdi:thermometer',
'battery_voltage': 'mdi:battery-charging-100',
'exposure_lock': 'mdi:camera',
'ffc': 'mdi:camera-front-variant',
'focus': 'mdi:image-filter-center-focus',
'gps_active': 'mdi:crosshairs-gps',
'light': 'mdi:flashlight',
'motion': 'mdi:run',
'night_vision': 'mdi:weather-night',
'overlay': 'mdi:monitor',
'pressure': 'mdi:gauge',
'proximity': 'mdi:map-marker-radius',
'quality': 'mdi:quality-high',
'sound': 'mdi:speaker',
'sound_event': 'mdi:speaker',
'sound_timeout': 'mdi:speaker',
'torch': 'mdi:white-balance-sunny',
'video_chunk_len': 'mdi:video',
'video_connections': 'mdi:eye',
'video_recording': 'mdi:record-rec',
'whitebalance_lock': 'mdi:white-balance-auto'
}
SWITCHES = ['exposure_lock', 'ffc', 'focus', 'gps_active', 'night_vision',
'overlay', 'torch', 'whitebalance_lock', 'video_recording']
SENSORS = ['audio_connections', 'battery_level', 'battery_temp',
'battery_voltage', 'light', 'motion', 'pressure', 'proximity',
'sound', 'video_connections']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL):
cv.time_period,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_SWITCHES, default=None):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_SENSORS, default=None):
vol.All(cv.ensure_list, [vol.In(SENSORS)]),
vol.Optional(CONF_MOTION_SENSOR, default=None): cv.boolean,
})])
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the IP Webcam component."""
from pydroid_ipcam import PyDroidIPCam
webcams = hass.data[DATA_IP_WEBCAM] = {}
websession = async_get_clientsession(hass)
@asyncio.coroutine
def async_setup_ipcamera(cam_config):
"""Set up an IP camera."""
host = cam_config[CONF_HOST]
username = cam_config.get(CONF_USERNAME)
password = cam_config.get(CONF_PASSWORD)
name = cam_config[CONF_NAME]
interval = cam_config[CONF_SCAN_INTERVAL]
switches = cam_config[CONF_SWITCHES]
sensors = cam_config[CONF_SENSORS]
motion = cam_config[CONF_MOTION_SENSOR]
# Init ip webcam
cam = PyDroidIPCam(
hass.loop, websession, host, cam_config[CONF_PORT],
username=username, password=password,
timeout=cam_config[CONF_TIMEOUT]
)
if switches is None:
switches = [setting for setting in cam.enabled_settings
if setting in SWITCHES]
if sensors is None:
sensors = [sensor for sensor in cam.enabled_sensors
if sensor in SENSORS]
sensors.extend(['audio_connections', 'video_connections'])
if motion is None:
motion = 'motion_active' in cam.enabled_sensors
@asyncio.coroutine
def async_update_data(now):
"""Update data from IP camera in SCAN_INTERVAL."""
yield from cam.update()
async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host)
async_track_point_in_utc_time(
hass, async_update_data, utcnow() + interval)
yield from async_update_data(None)
# Load platforms
webcams[host] = cam
mjpeg_camera = {
CONF_PLATFORM: 'mjpeg',
CONF_MJPEG_URL: cam.mjpeg_url,
CONF_STILL_IMAGE_URL: cam.image_url,
CONF_NAME: name,
}
if username and password:
mjpeg_camera.update({
CONF_USERNAME: username,
CONF_PASSWORD: password
})
hass.async_add_job(discovery.async_load_platform(
hass, 'camera', 'mjpeg', mjpeg_camera, config))
if sensors:
hass.async_add_job(discovery.async_load_platform(
hass, 'sensor', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SENSORS: sensors,
}, config))
if switches:
hass.async_add_job(discovery.async_load_platform(
hass, 'switch', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SWITCHES: switches,
}, config))
if motion:
hass.async_add_job(discovery.async_load_platform(
hass, 'binary_sensor', DOMAIN, {
CONF_HOST: host,
CONF_NAME: name,
}, config))
tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]]
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
return True
class AndroidIPCamEntity(Entity):
"""The Android device running IP Webcam."""
def __init__(self, host, ipcam):
"""Initialize the data oject."""
self._host = host
self._ipcam = ipcam
@asyncio.coroutine
def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_ipcam_update(host):
"""Update callback."""
if self._host != host:
return
self.hass.async_add_job(self.async_update_ha_state(True))
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._ipcam.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {ATTR_HOST: self._host}
if self._ipcam.status_data is None:
return state_attr
state_attr[ATTR_VID_CONNS] = \
self._ipcam.status_data.get('video_connections')
state_attr[ATTR_AUD_CONNS] = \
self._ipcam.status_data.get('audio_connections')
return state_attr
|
MungoRae/home-assistant
|
homeassistant/components/android_ip_webcam.py
|
Python
|
apache-2.0
| 9,814
|
import Queue
import time
from threading import Thread
class ActiveThreadListItem():
def __init__(self, thread, name):
self.thread = thread
self.name = name
def getThread(self):
return self.thread
def getName(self):
return self.name
class EventObject():
def __init__(self, _instance, vector, event):
self._instance = _instance
self.vector = vector
self.event = event
def get_event(self):
return self.event
def get_name(self):
return self._instance.getShortName()
def get_instance(self):
return self._instance
def get_vector(self):
return self.vector
class EventQueue():
eventQueue = Queue.Queue()
@staticmethod
def pop():
return EventQueue.eventQueue.get()
@staticmethod
def push(evtobj):
# print("NEW EVENT: " + evtobj.get_event())
EventQueue.eventQueue.put(evtobj)
return
@staticmethod
def empty():
return EventQueue.eventQueue.empty()
@staticmethod
def size():
return EventQueue.eventQueue.qsize()
class EventHandler(object):
eventList = {}
nameList = list()
my_threads = list()
ActiveThreadCountThread = False
@staticmethod
def add(_instance, event):
if (event in EventHandler.eventList):
EventHandler.eventList[event].append(_instance)
else:
EventHandler.eventList[event] = [_instance]
@staticmethod
def remove(_instance, event):
if (event in EventHandler.eventList):
EventHandler.eventList[event].remove(_instance)
@staticmethod
def fire(event):
parts = event.split(":")
event = parts[0]
vector = ""
if (len(parts) == 2):
vector = parts[1]
# make sure this event/vector pair is not already in the queue
if not (event + ":" + vector) in EventHandler.nameList:
if (event in EventHandler.eventList):
for _instance in EventHandler.eventList[event]:
EventQueue.push(EventObject(_instance, vector, event))
EventHandler.nameList.append(event + ":" + vector)
@staticmethod
def numActiveThreads(name):
num = 0
for t in EventHandler.my_threads:
if t.getName() == name:
num = num + 1
return num
@staticmethod
def colapsethreads():
tmp_threads = list()
for t in EventHandler.my_threads:
if t.getThread().isAlive():
tmp_threads.append(t)
EventHandler.my_threads = tmp_threads
@staticmethod
def finished():
EventHandler.colapsethreads()
if (EventQueue.empty() and (len(EventHandler.my_threads) == 0)):
return True
return False
@staticmethod
def kill_thread_count_thread():
EventHandler.ActiveThreadCountThread = False
@staticmethod
def print_thread_count(display, delay=5):
EventHandler.ActiveThreadCountThread = True
while (EventHandler.ActiveThreadCountThread):
while (EventHandler.ActiveThreadCountThread and len(EventHandler.my_threads) == 0):
time.sleep(delay)
display.alert("Current # of Active Threads = [%i]" %
len(EventHandler.my_threads))
tmp_list = ""
for t in EventHandler.my_threads:
if not tmp_list == "":
tmp_list = tmp_list + ", "
tmp_list = tmp_list + t.getName()
display.alert(" ==> " + tmp_list)
display.debug("EventQueue Size = [%i]" % EventQueue.size())
time.sleep(delay)
@staticmethod
def processNext(display, max_threads):
# wait for a thread to free up
while (len(EventHandler.my_threads) >= max_threads):
EventHandler.colapsethreads()
# make sure there are events to process
if not EventQueue.empty():
evtobj = EventQueue.pop()
_instance = evtobj.get_instance()
vector = evtobj.get_vector()
event = evtobj.get_event()
EventHandler.nameList.remove(event + ":" + vector)
# check to see if the target module is at maxThreads and if so, add it back to the queue
if _instance and (
EventHandler.numActiveThreads(_instance.getShortName()) >= int(_instance.getMaxThreads())):
EventHandler.fire(event + ":" + vector)
else:
display.verbose("Launching [%s] Vector [%s]" % (_instance.getTitle(), vector))
if _instance:
thread = Thread(target=_instance.go, args=(vector,))
thread.setDaemon(True)
thread.start()
EventHandler.my_threads.append(ActiveThreadListItem(thread, _instance.getShortName()))
# _instance.go(vector)
|
MooseDojo/apt2
|
core/events.py
|
Python
|
mit
| 4,988
|
"""
This script parses an ENA metadata file in XML format and prints a subset of information.
Usage: python parse_ENA_sampleInfo_XML.py ERP000909.xml > samples.txt
Input: an XML file exported for a list of ERS accession numbers from ENA using the REST URLs API. For example, one can download an XML file
for sample ERS086023 using http://www.ebi.ac.uk/ena/data/view/ERS086023&display=xml.
Output: a tab-delimited text file containing information retrieved from the XML file.
study_accession, sample_accession, secondary_sample_accession, experiment_accession, run_accession, Isolate_ID, Host, Place_of_isolation, Year_of_isolation
Author of this version: Yu Wan (wanyuac@gmail.com, https://github.com/wanyuac)
Edition history: 6-7, 11 August 2015
Licence: GNU GPL 2.1
"""
import sys
import xml.etree.ElementTree as xmlTree
def get_domains(sample):
study = BioSample = ERS = experiment = run = isolate = strain = host = place = year = "NA" # default value of all fields
for domain in sample:
if domain.tag == "IDENTIFIERS":
BioSample, ERS = sample[0][1].text, sample[0][0].text # <tag>text</tag>
if domain.tag == "SAMPLE_LINKS":
study = sample[4][0][0][1].text # visit nested elements with indices
experiment = sample[4][1][0][1].text
run = sample[4][2][0][1].text
if domain.tag == "SAMPLE_ATTRIBUTES": # This domain may be variable in terms of attributes
for attribute in domain:
if attribute[0].text == "collection_date":
year = attribute[1].text
elif attribute[0].text == "isolate":
isolate = attribute[1].text
elif attribute[0].text == "specific_host":
host = attribute[1].text
elif attribute[0].text == "country":
place = attribute[1].text
elif attribute[0].text == "strain":
strain = attribute[1].text
return [study, BioSample, ERS, experiment, run, isolate, strain, host, place, year]
def main():
file = sys.argv[1]
xml = xmlTree.parse(file).getroot() # parse an XML into a tree of elements
# print the header line
print "\t".join(["study_accession", "sample_accession", "secondary_sample_accession", "experiment_accession", "run_accession", "Isolate_ID", "Strain", "Host", "Place_of_isolation", "Year_of_isolation"])
for sample in xml:
print "\t".join(get_domains(sample))
return
if __name__ == '__main__':
main()
|
wanyuac/BINF_toolkit
|
parse_ENA_sampleInfo_XML.py
|
Python
|
gpl-3.0
| 2,321
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import ad_group_audience_view
from google.ads.googleads.v9.services.types import (
ad_group_audience_view_service,
)
from .transports.base import (
AdGroupAudienceViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import AdGroupAudienceViewServiceGrpcTransport
class AdGroupAudienceViewServiceClientMeta(type):
"""Metaclass for the AdGroupAudienceViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupAudienceViewServiceTransport]]
_transport_registry["grpc"] = AdGroupAudienceViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupAudienceViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupAudienceViewServiceClient(
metaclass=AdGroupAudienceViewServiceClientMeta
):
"""Service to manage ad group audience views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAudienceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAudienceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupAudienceViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupAudienceViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def ad_group_audience_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified ad_group_audience_view string."""
return "customers/{customer_id}/adGroupAudienceViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_ad_group_audience_view_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_audience_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupAudienceViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupAudienceViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group audience view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupAudienceViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupAudienceViewServiceTransport):
# transport is a AdGroupAudienceViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupAudienceViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_audience_view(
self,
request: Union[
ad_group_audience_view_service.GetAdGroupAudienceViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_audience_view.AdGroupAudienceView:
r"""Returns the requested ad group audience view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAdGroupAudienceViewRequest, dict]):
The request object. Request message for
[AdGroupAudienceViewService.GetAdGroupAudienceView][google.ads.googleads.v9.services.AdGroupAudienceViewService.GetAdGroupAudienceView].
resource_name (:class:`str`):
Required. The resource name of the ad
group audience view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AdGroupAudienceView:
An ad group audience view.
Includes performance data from interests
and remarketing lists for Display
Network and YouTube Network ads, and
remarketing lists for search ads (RLSA),
aggregated at the audience level.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_audience_view_service.GetAdGroupAudienceViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
ad_group_audience_view_service.GetAdGroupAudienceViewRequest,
):
request = ad_group_audience_view_service.GetAdGroupAudienceViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_ad_group_audience_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdGroupAudienceViewServiceClient",)
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/ad_group_audience_view_service/client.py
|
Python
|
apache-2.0
| 19,485
|
#!/usr/bin/env python
import cgi
reshtml = '''Content-Type: text/html\n
<HTML><HEAD><TITLE>
Friends CGI Demo (dynamic screen)
</TITLE></HEAD>
<BODY><H3>Friends list for: <I>%s</I></H3>
Your name is: <B>%s</B><P>
You have <B>%s</B> friends.
</BODY></HTML>'''
form = cgi.FieldStorage()
who = form['person'].value
howmany = form['howmany'].value
print(reshtml % (who, who, howmany))
|
YuxuanLing/trunk
|
trunk/code/study/python/core_python_appilication/ch10/friendsA3.py
|
Python
|
gpl-3.0
| 400
|
"""
This tool does some migrations on the database.
We intend to have a format to export to be used for migration to a newer/older rhizi database,
but since this tool is currently written and useful it is being put, intended as temporary, to the db.
"""
import sys
import base64
import gzip
from py2neo import Graph, watch
graph_db = None
def base64_gzip(text):
return base64.encodestring(gzip.zlib.compress(text))
def update_blob_compression_to_gzip():
nodes = [row.x for row in graph_db.cypher.execute('match (x:__COMMIT) return x')]
changed = 0
for node in nodes:
if node['name'] == 'root-commit':
continue
blob = node['blob']
if blob[:1] != '{':
decoded = base64.decodestring(blob)
try:
blob = gzip.zlib.decompress(decoded)
except:
try:
blob = gzip.zlib.decompress(decoded, 16)
except:
import pdb; pdb.set_trace()
new_blob = base64_gzip(blob)
if new_blob != node['blob']:
changed += 1
node['blob'] = new_blob
if changed > 0:
print('updated %s commit node blobs' % changed)
graph_db.push(*nodes)
else:
print("all commit node blobs are up to date")
def rhizi_db_version():
return map(int, graph_db.cypher.execute('match (x:__RZDB_META) return x')[0].x['schema_version'].split('.'))
def main():
global graph_db
watch("httpstream")
port = 7474 if len(sys.argv) == 1 else int(sys.argv[1])
graph_db = Graph('http://localhost:{}/db/data'.format(port))
version = rhizi_db_version()
if len(version) == 2:
rhizi_major, rhizi_minor, rhizi_micro = version[0], version[1], 0
else:
assert(len(version) == 3)
rhizi_major, rhizi_minor, rhizi_micro = version
print("rhizi db schema version: %r.%r.%r" % (rhizi_major, rhizi_minor, rhizi_micro))
if rhizi_major <= 0 and rhizi_minor <= 2:
update_blob_compression_to_gzip()
if __name__ == '__main__':
main()
|
shlomif/rhizi
|
src/temporary/migrate.py
|
Python
|
agpl-3.0
| 2,076
|
from django.conf.urls import patterns, include, url
from projects import views
urlpatterns = patterns('projects.urls',
url(r'^orte/$', views.orte),
url(r'^orte/cols$', views.orte_cols),
url(r'^orte/rows$', views.orte_rows),
url(r'^orte/(?P<pk>\d+)/$', views.ort),
url(r'^veroeffentlichungen/$', views.veroeffentlichungen),
url(r'^veroeffentlichungen/(?P<pk>\d+)/$', views.veroeffentlichung),
url(r'^verfahrensschritte/$', views.verfahrensschritte),
url(r'^verfahrensschritte/(?P<pk>\d+)/$', views.verfahrensschritt)
)
|
BuergerbautStadt/bbs-old
|
projects/urls.py
|
Python
|
agpl-3.0
| 552
|
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Lists models"
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Unicode, Integer, BigInteger
from sqlalchemy.schema import UniqueConstraint
from baruwa.model.meta import Base
class List(Base):
"List object"
__tablename__ = 'lists'
__table_args__ = (UniqueConstraint('from_address', 'to_address'), {})
id = Column(BigInteger, primary_key=True)
list_type = Column(Integer, default=1)
from_address = Column(Unicode(255), index=True)
to_address = Column(Unicode(255), default=u'any', index=True)
user_id = Column(Integer, ForeignKey('users.id'))
from_addr_type = Column(Integer)
__mapper_args__ = {'order_by':id}
def tojson(self):
"Return json"
return dict(
id=self.id,
#list_type=self.list_type,
from_address=self.from_address,
to_address=self.to_address,
)
|
TetraAsh/baruwa2
|
baruwa/model/lists.py
|
Python
|
gpl-3.0
| 1,763
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2018_11_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_inbound_nat_rules_operations.py
|
Python
|
mit
| 22,205
|
#!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from gstack import db
class AccessToken(db.Model):
__tablename__ = 'accesstoken'
access_token = db.Column(db.String(100), primary_key=True, unique=True)
client_id = db.Column(db.String(100), unique=True)
expires_in = db.Column(db.String(10))
id_token = db.Column(db.String(1000))
data = db.Column(db.String(500))
def __init__(self, access_token, client_id, expires_in, id_token, data):
self.access_token = access_token
self.client_id = client_id
self.expires_in = expires_in
self.id_token = id_token
self.data = data
|
apache/cloudstack-gcestack
|
gstack/models/accesstoken.py
|
Python
|
apache-2.0
| 1,414
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USER and provide wmake rules for special purpose
# 'USER and 'USERMPI' mpi implementations.
# The choice of 'USER' vs 'USERMPI' may change in the future.
#
# Changes
# 2017-03-28 Mark Olesen <mark.olesen@esi-group.com>
# - avoid installing intermediate targets.
# - reworked to mirror the openfoam-com package.
# If changes are needed here, consider if they need applying there too.
#
# Known issues
# - Combining +parmgridgen with +float32 probably won't work.
#
##############################################################################
import glob
import re
import shutil
import os
from spack import *
from spack.environment import EnvironmentModifications
from spack.pkg.builtin.openfoam_com import OpenfoamArch
from spack.pkg.builtin.openfoam_com import add_extra_files
from spack.pkg.builtin.openfoam_com import write_environ
from spack.pkg.builtin.openfoam_com import rewrite_environ_files
import llnl.util.tty as tty
class FoamExtend(Package):
"""The Extend Project is a fork of the OpenFOAM opensource library
for Computational Fluid Dynamics (CFD).
This offering is not approved or endorsed by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
"""
homepage = "http://www.extend-project.de/"
version('4.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-4.0.git')
version('3.2', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.2.git')
version('3.1', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.1.git')
version('3.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.0.git')
# variant('int64', default=False,
# description='Compile with 64-bit label')
variant('float32', default=False,
description='Compile with 32-bit scalar (single-precision)')
variant('paraview', default=False,
description='Build paraview plugins (eg, paraFoam)')
variant('scotch', default=True,
description='With scotch for decomposition')
variant('ptscotch', default=True,
description='With ptscotch for decomposition')
variant('metis', default=True,
description='With metis for decomposition')
variant('parmetis', default=True,
description='With parmetis for decomposition')
variant('parmgridgen', default=True,
description='With parmgridgen support')
variant('source', default=True,
description='Install library/application sources and tutorials')
provides('openfoam')
depends_on('mpi')
depends_on('python')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('cmake', type='build')
depends_on('scotch~metis', when='~ptscotch+scotch')
depends_on('scotch~metis+mpi', when='+ptscotch')
depends_on('metis@5:', when='+metis')
depends_on('parmetis', when='+parmetis')
# mgridgen is statically linked
depends_on('parmgridgen', when='+parmgridgen', type='build')
depends_on('paraview@:5.0.1', when='+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Some user config settings
config = {
'label-size': False, # <- No int32/int64 support
'mplib': 'USERMPI', # USER | USERMPI
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # <- Added by patch() method.
#
# - End of definitions / setup -
#
def setup_environment(self, spack_env, run_env):
"""Add environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ . $WM_PROJECT_DIR/etc/bashrc
"""
# NOTE: Spack runs setup_environment twice.
# 1) pre-build to set up the build environment
# 2) post-install to determine runtime environment variables
# The etc/bashrc is only available (with corrrect content)
# post-installation.
bashrc = join_path(self.projectdir, 'etc', 'bashrc')
minimal = True
if os.path.isfile(bashrc):
# post-install: source the installed bashrc
try:
mods = EnvironmentModifications.from_sourcing_file(
bashrc,
clean=True, # Remove duplicate entries
blacklist=[ # Blacklist these
# Inadvertent changes
# -------------------
'PS1', # Leave unaffected
'MANPATH', # Leave unaffected
# Unneeded bits
# -------------
'FOAM_INST_DIR', # Possibly incorrect
'FOAM_(APP|ETC|SRC|SOLVERS|UTILITIES)',
'FOAM_TEST_.*_DIR',
'WM_NCOMPPROCS',
# 'FOAM_TUTORIALS', # can be useful
# Lots of third-party cruft
# -------------------------
'[A-Z].*_(BIN|LIB|INCLUDE)_DIR',
'[A-Z].*_SYSTEM',
'WM_THIRD_PARTY_.*',
'(BISON|FLEX|CMAKE|ZLIB)_DIR',
'(METIS|PARMETIS|PARMGRIDGEN|SCOTCH)_DIR',
# User-specific
# -------------
'FOAM_RUN',
'(FOAM|WM)_.*USER_.*',
],
whitelist=[ # Whitelist these
'MPI_ARCH_PATH', # Can be needed for compilation
'PYTHON_BIN_DIR',
])
run_env.extend(mods)
minimal = False
tty.info('foam-extend env: {0}'.format(bashrc))
except Exception:
minimal = True
if minimal:
# pre-build or minimal environment
tty.info('foam-extend minimal env {0}'.format(self.prefix))
run_env.set('FOAM_INST_DIR', os.path.dirname(self.projectdir)),
run_env.set('FOAM_PROJECT_DIR', self.projectdir)
run_env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin added automatically
run_env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Location of the OpenFOAM project.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('applications', 'bin', self.foam_arch)
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('lib', self.foam_arch)
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Adjust ParMGridGen - this is still a mess
files = [
'src/dbns/Make/options',
'src/fvAgglomerationMethods/MGridGenGamgAgglomeration/Make/options' # noqa: E501
]
for f in files:
filter_file(r'-lMGridGen', r'-lmgrid', f, backup=False)
# Adjust for flex version check
files = [
'src/thermophysicalModels/reactionThermo/chemistryReaders/chemkinReader/chemkinLexer.L', # noqa: E501
'src/surfMesh/surfaceFormats/stl/STLsurfaceFormatASCII.L', # noqa: E501
'src/meshTools/triSurface/triSurface/interfaces/STL/readSTLASCII.L', # noqa: E501
'applications/utilities/preProcessing/fluentDataToFoam/fluentDataToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/gambitToFoam/gambitToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToFoam/fluent3DMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/ansysToFoam/ansysToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluentMeshToFoam/fluentMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToElmer/fluent3DMeshToElmer.L' # noqa: E501
]
for f in files:
filter_file(
r'#if YY_FLEX_SUBMINOR_VERSION < 34',
r'#if YY_FLEX_MAJOR_VERSION <= 2 && YY_FLEX_MINOR_VERSION <= 5 && YY_FLEX_SUBMINOR_VERSION < 34', # noqa: E501
f, backup=False)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
'000': { # Sort first
'compilerInstall': 'System',
},
'001': {},
'cmake': {
'CMAKE_DIR': spec['cmake'].prefix,
'CMAKE_BIN_DIR': spec['cmake'].prefix.bin,
},
'python': {
'PYTHON_DIR': spec['python'].home,
'PYTHON_BIN_DIR': spec['python'].home.bin,
},
'flex': {
'FLEX_SYSTEM': 1,
'FLEX_DIR': spec['flex'].prefix,
},
'bison': {
'BISON_SYSTEM': 1,
'BISON_DIR': spec['flex'].prefix,
},
'zlib': {
'ZLIB_SYSTEM': 1,
'ZLIB_DIR': spec['zlib'].prefix,
},
}
# Adjust configuration via prefs - sort second
self.etc_prefs['001'].update(self.foam_arch.foam_dict())
if '+scotch' in spec or '+ptscotch' in spec:
pkg = spec['scotch'].prefix
self.etc_prefs['scotch'] = {
'SCOTCH_SYSTEM': 1,
'SCOTCH_DIR': pkg,
'SCOTCH_BIN_DIR': pkg.bin,
'SCOTCH_LIB_DIR': pkg.lib,
'SCOTCH_INCLUDE_DIR': pkg.include,
}
if '+metis' in spec:
pkg = spec['metis'].prefix
self.etc_prefs['metis'] = {
'METIS_SYSTEM': 1,
'METIS_DIR': pkg,
'METIS_BIN_DIR': pkg.bin,
'METIS_LIB_DIR': pkg.lib,
'METIS_INCLUDE_DIR': pkg.include,
}
if '+parmetis' in spec:
pkg = spec['parmetis'].prefix
self.etc_prefs['parametis'] = {
'PARMETIS_SYSTEM': 1,
'PARMETIS_DIR': pkg,
'PARMETIS_BIN_DIR': pkg.bin,
'PARMETIS_LIB_DIR': pkg.lib,
'PARMETIS_INCLUDE_DIR': pkg.include,
}
if '+parmgridgen' in spec:
pkg = spec['parmgridgen'].prefix
self.etc_prefs['parmgridgen'] = {
'PARMGRIDGEN_SYSTEM': 1,
'PARMGRIDGEN_DIR': pkg,
'PARMGRIDGEN_BIN_DIR': pkg.bin,
'PARMGRIDGEN_LIB_DIR': pkg.lib,
'PARMGRIDGEN_INCLUDE_DIR': pkg.include,
}
if '+paraview' in self.spec:
self.etc_prefs['paraview'] = {
'PARAVIEW_SYSTEM': 1,
'PARAVIEW_DIR': spec['paraview'].prefix,
'PARAVIEW_BIN_DIR': spec['paraview'].prefix.bin,
}
self.etc_prefs['qt'] = {
'QT_SYSTEM': 1,
'QT_DIR': spec['qt'].prefix,
'QT_BIN_DIR': spec['qt'].prefix.bin,
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = []
if self.parallel: # Build in parallel? - pass via the environment
os.environ['WM_NCOMPPROCS'] = str(make_jobs)
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
opts = str(self.foam_arch)
# Fairly ugly since intermediate targets are scattered inside sources
appdir = 'applications'
projdir = os.path.basename(self.projectdir)
mkdirp(self.projectdir, join_path(self.projectdir, appdir))
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allclean|Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Install directories. install applications/bin directly
# Install 'etc' before 'bin' (for symlinks)
for d in ['etc', 'bin', 'wmake', 'lib', join_path(appdir, 'bin')]:
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
if '+source' in spec:
subitem = join_path(appdir, 'Allwmake')
install(subitem, join_path(self.projectdir, subitem))
ignored = [opts] # Ignore intermediate targets
for d in ['src', 'tutorials']:
install_tree(
d,
join_path(self.projectdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
for d in ['solvers', 'utilities']:
install_tree(
join_path(appdir, d),
join_path(self.projectdir, appdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path('.spack', 'build.out'),
join_path('log.' + str(self.foam_arch)))
# -----------------------------------------------------------------------------
|
mfherbst/spack
|
var/spack/repos/builtin/packages/foam-extend/package.py
|
Python
|
lgpl-2.1
| 17,892
|
#-*- coding: utf-8 -*-
import numpy as n
from scipy.io import wavfile as w
import imp
fun=imp.load_source("functions","../aux/functions.py")
v = fun.V
def A(fa=2.,V_dB=10.,d=2.,taba=fun.S):
return fun.T(d, fa, V_dB, taba=taba)
def adsr(s, A=20, D=20, S=-10, R=100):
return fun.AD(A=A, D=D, S=S, R=R, sonic_vector=s)
W = fun.W
Tr_i = fun.Tr
Q_i = fun.Q
D_i = fun.Sa
S_i = fun.S
H = n.hstack
V = n.vstack
f_a = 44100 # Hz, frequência de amostragem
############## 2.2.1 Tabela de busca (LUT)
Lambda_tilde=Lt=1024.
# Senoide
foo=n.linspace(0,2*n.pi,Lt,endpoint=False)
S_i=n.sin(foo) # um período da senóide com T amostras
# Quadrada:
Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) )
# Triangular:
foo=n.linspace(-1,1,Lt/2,endpoint=False)
Tr_i=n.hstack( ( foo , foo*-1 ) )
# Dente de Serra:
D_i=n.linspace(-1,1,Lt)
def v(f=200,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i):
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lv=float(len(S_i))
Gammav_i=n.floor(ii*fv*Lv/f_a) # índices para a LUT
Gammav_i=n.array(Gammav_i,n.int)
Tv_i=tabv[Gammav_i%int(Lv)] # padrão de variação do vibrato para cada amostra
F_i=f*( 2.**( Tv_i*nu/12. ) ) # frequência em Hz em cada amostra
D_gamma_i=F_i*(Lt/float(f_a)) # a movimentação na tabela por amostra
Gamma_i=n.cumsum(D_gamma_i) # a movimentação na tabela total
Gamma_i=n.floor( Gamma_i) # já os índices
Gamma_i=n.array( Gamma_i, dtype=n.int) # já os índices
return tab[Gamma_i%int(Lt)] # busca dos índices na tabela
def A(fa=2.,V_dB=10.,d=2.,taba=S_i):
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lt=float(len(taba))
Gammaa_i=n.floor(ii*fa*Lt/f_a) # índices para a LUT
Gammaa_i=n.array(Gammaa_i,n.int)
### 2.55 padrão de oscilação do vibrato
A_i=taba[Gammaa_i%int(Lt)] # padrão de variação da amplitude do tremolo para cada amostra
A_i=A_i*10.**(V_dB/20.)
return A_i
def adsr(som,A=10.,D=20.,S=-20.,R=100.,xi=1e-2):
a_S=10**(S/20.)
Lambda=len(som)
Lambda_A=int(A*f_a*0.001)
Lambda_D=int(D*f_a*0.001)
Lambda_R=int(R*f_a*0.001)
ii=n.arange(Lambda_A,dtype=n.float)
A=ii/(Lambda_A-1)
A_i=A
ii=n.arange(Lambda_A,Lambda_D+Lambda_A,dtype=n.float)
D=1-(1-a_S)*( ( ii-Lambda_A )/( Lambda_D-1) )
A_i=n.hstack( (A_i, D ) )
S=n.ones(Lambda-Lambda_R-(Lambda_A+Lambda_D),dtype=n.float)*a_S
A_i=n.hstack( ( A_i, S ) )
ii=n.arange(Lambda-Lambda_R,Lambda,dtype=n.float)
R=a_S-a_S*((ii-(Lambda-Lambda_R))/(Lambda_R-1))
A_i=n.hstack( (A_i,R) )
return som*A_i
BPM=60. #80 batidas por minuto
DELTA=BPM/60 # duração da batida
LAMBDA=DELTA*f_a # número de samples da batida
LAMBDA_=int(LAMBDA) # inteiro para operação com índices
#cabeca=[1]+[0]*(LAMBDA-1)
#contra=[0]*Lambda/2+[1]+[0]*(Lambda/2-1)
tempo=n.zeros(LAMBDA)
cabeca=n.copy(tempo); cabeca[0]=1.
contra=n.copy(tempo); contra[LAMBDA_/2]=1.
# tempo de musica
Delta=4*DELTA # segundos
Lambda=Delta*f_a
Lambda_=int(Lambda)
ii=n.arange(Lambda_)
linha_cabeca=cabeca[ii%LAMBDA_]
linha_contra=contra[ii%LAMBDA_]
som1=adsr(v(tabv=Tr_i ,d=.3,fv=3.,nu=7.0,f=300.),10,10,-10.)
som2=adsr(v(tabv=Tr_i ,d=.2,fv=2.,nu=1.),10,10,-10.)
som3=adsr(v(tabv=Tr_i ,d=.2,fv=10.,nu=7.),10,10,-10.)
som4=adsr(v(tabv=Tr_i ,d=.2,fv=3.,nu=7.,f=1800.),1.,100.,-60.,80.)
som5=adsr(v(tabv=Tr_i ,d=.2,fv=3.,nu=7.,f=1800.)*A(d=.2,fa=100.),1.,100.,-60.,80.)
som6=adsr(v(tabv=Tr_i ,d=.2,fv=30.,nu=7.,f=1800.)*A(d=.2),1.,100.,-60.,80.)
em3=n.copy(tempo);em3[[0,LAMBDA_/3,2*LAMBDA_/3]]=1.
linha_em3=em3[ii%LAMBDA_]
##############
#RUIDOS
Lambda = 100000 # Lambda sempre par
# diferença das frequências entre coeficiêntes vizinhos:
df=f_a/float(Lambda)
# e fase aleatoria
coefs=n.exp(1j*n.random.uniform(0, 2*n.pi, Lambda))
# real par, imaginaria impar
coefs[Lambda/2+1:]=n.real(coefs[1:Lambda/2])[::-1] - 1j*n.imag(coefs[1:Lambda/2])[::-1]
coefs[0]=0. # sem bias
coefs[Lambda/2]=1. # freq max eh real simplesmente
# as frequências relativas a cada coeficiente
# acima de Lambda/2 nao vale
fi=n.arange(coefs.shape[0])*df
f0=15. # iniciamos o ruido em 15 Hz
i0=n.floor(f0/df) # primeiro coeff a valer
coefs[:i0]=n.zeros(i0)
f0=fi[i0]
# realizando o ruído em suas amostras temporais
ruido=n.fft.ifft(coefs)
r=n.real(ruido)
rb=((r-r.min())/(r.max()-r.min()))*2-1 # ruido branco
# fazendo ruido preto
fator=10.**(-7/20.)
alphai=fator**(n.log2(fi[i0:]/f0))
c=n.copy(coefs)
c[i0:]=c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:]=n.real(c[1:Lambda/2])[::-1] - 1j*n.imag(c[1:Lambda/2])[::-1]
ruido=n.fft.ifft(c)
r=n.real(ruido)
rp=((r-r.min())/(r.max()-r.min()))*2-1
LR=rb[n.arange(int(len(linha_em3)*2.5))%len(rb)]*A(d=int(len(linha_em3)*2.5)/f_a,fa=.2,V_dB=50.)*10**(-60/20.)
LR2=rb[n.arange(int(len(linha_em3)*4.5))%len(rb)]*A(d=int(len(linha_em3)*4.5)/f_a)*.05
LR3=6.*rp[n.arange(int(len(linha_em3)*6.5))%len(rp)]*A(d=int(len(linha_em3)*6.5)/f_a)*.05
obj1=rb[:int(.4*f_a)]*A(d=.4,fa=15.)
obj2=rp[:int(.4*f_a)]*A(d=.4,fa=10.)
obj3=adsr(rb[:int(.4*f_a)]*A(d=.4,fa=15.))
obj4=adsr(rp[:int(.4*f_a)]*A(d=.4,fa=10.),S=-5)
obj5=adsr(rp[:int(1.4*f_a)]*A(d=1.4,fa=10.),5.,500.,-20,200)
############
l1=n.convolve(obj1,linha_em3)[:len(linha_em3)]
l2=n.convolve(obj2,linha_em3)[:len(linha_em3)]
l3=n.convolve(obj3,linha_em3)[:len(linha_em3)]
l4=n.convolve(obj4,linha_em3)[:len(linha_em3)]
l6=n.convolve(obj5,linha_em3)[:len(linha_em3)]
l1_=n.convolve(obj1,linha_cabeca)[:len(linha_em3)]
l2_=n.convolve(obj2,linha_contra)[:len(linha_em3)]
l3_=n.convolve(obj3,linha_cabeca)[:len(linha_em3)]
l4_=n.convolve(obj4,linha_contra)[:len(linha_em3)]
l6_=n.convolve(obj5,linha_cabeca)[:len(linha_em3)]
print("AA")
linha1=n.convolve(som2,linha_cabeca)[:len(linha_cabeca)]
linha2=n.convolve(som4,linha_em3)[:len(linha_em3)]
linha4=n.convolve(som5,linha_em3)[:len(linha_em3)]
linha6=n.convolve(som6,linha_em3)[:len(linha_em3)]
linha3=n.convolve(som2,linha_contra)[:len(linha_contra)]
som=n.hstack((linha1+linha2+l1,linha2+linha3+l2,linha3+linha1+l3,linha1+linha2+linha3+l4,linha2+l6))
som=n.hstack((som,linha4+linha2+l1_,l2_+linha6+linha3,l3_+linha4+linha3+linha1,l4_+linha1+linha2+linha3,l6_+linha6+linha2))
som[:len(LR)]+=LR
som[len(l1)*3:len(l1)*3+len(LR2)]+=LR2
som[int(len(l1)*3.5):int(len(l1)*3.5)+len(LR3)]+=LR3
som[len(l1)*7:len(l1)*7+len(LR)]+=LR
print("BB")
W(som, "ruidosaFaixa2.wav")
|
ttm/mass
|
src/pieces3/ruidosaFaixa2.py
|
Python
|
gpl-3.0
| 6,375
|
##############################################################################
#
# Copyright (C) 2016 Comunitea Servicios Tecnológicos
# $Omar Castiñeira Saavedra <omar@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import claim_make_picking
from . import claim_make_picking_from_picking
from . import invoice_discount_wiz
|
Comunitea/CMNT_004_15
|
project-addons/crm_claim_rma_custom/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,076
|
#!/usr/bin/env python
"""
Transforms an SQL extract of pre-6.0 Stroom users to 6.0 users, ready for
import to the stroom-auth database. If you're upgrading to Stroom 6.0 then
this is a necessary step.
This scripts expects the input TSV to have the following colums: NAME, STAT.
If a user already exists it won't try and insert that user.
You could use something like the following SQL to dump the users from Stroom
into a TSV:
echo 'SELECT NAME, STAT FROM USR' | mysql -h"192.168.1.9" -P"3307" \
-u"stroomuser" -p"stroompassword1" stroom > user_extract.tsv
If you have the right permissions you could also get the extract like this by
executing the following in mysql:
SELECT NAME, STAT INTO OUTFILE './user_extract.tsv' FROM USR;
"""
import csv
import datetime
import sys
usage = """
Usage:
transform_user_extract.py <input_tsv> <output_sql>
E.g.
./transform_user_extract.py user_extract.tsv transformed_users.sql
"""
def transform(input_tsv, output_sql):
status_mapping = {
0: "enabled",
1: "disabled",
2: "locked",
3: "inactive"
}
created_on = datetime.datetime.now().isoformat()
insert_template = """
INSERT IGNORE INTO users (
email,
password_hash,
state,
comments,
created_on,
created_by_user)
VALUES (
'{0}',
'No password set',
'{1}',
'This user was created during the upgrade to Stroom 6',
'{2}',
'transform_user_extract.py'
);"""
# Not doing 'with's on one line because we need Python 2.6 compatibility.
with open(input_tsv, 'rb') as tsvin:
with open(output_sql, 'w') as sqlout:
tsvin = csv.reader(tsvin, delimiter='\t')
# Remove the headers so we don't try to process them later
next(tsvin, None)
processed = 0
total = 0
for row in tsvin:
total += 1
print "Adding row:"
print "\tin:\t {0},{1}".format(row[0], row[1])
email = row[0]
status = status_mapping.get(int(row[1]))
if status is None:
print "\tout:\t ERROR! Couldn't map the status"
else:
print "\tout:\t {0},{1}".format(row[0], status)
processed += 1
sqlout.write(insert_template.format(
email, status, created_on))
print "Processed {0} users".format(processed)
if total != processed:
print "Unable to process {0} user(s).".format(
total - processed)
def main():
help_string = __doc__
# Something like docopt would be better for handling args but we're
# targeting environments where this might not be available.
if len(sys.argv) is 1:
print help_string
print usage
elif len(sys.argv) is 3:
input_file = sys.argv[1]
output_file = sys.argv[2]
transform(input_file, output_file)
else:
print 'Bad number of arguments'
print usage
if __name__ == '__main__':
main()
|
gchq/stroom
|
stroom-security/stroom-security-identity-db-jooq/src/scripts/transform_user_extract.py
|
Python
|
apache-2.0
| 3,176
|
# -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import logging
import time
import openerp.tests.common as common
from openerp.tools import convert_xml_import
from openerp import workflow
from openerp import tools
_logger = logging.getLogger(__name__)
YEAR = time.strftime('%Y')
def get_file(module_name, fp):
pathname = os.path.join(module_name, fp)
return tools.file_open(pathname)
DB = common.DB
ADMIN_USER_ID = common.ADMIN_USER_ID
def load_data(cr, module_name, fp, idref=None, mode='init',
noupdate=False, report=None):
pathname = os.path.join(module_name, fp)
fp = get_file(module_name, fp)
_logger.info("Import datas from %s" % pathname)
convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report)
class companyweb_test(common.TransactionCase):
def create_invoice(self, partner_id, date, amount):
in_id = self.registry('account.invoice').create(
self.cr, self.uid, {'reference_type': "none",
'date_invoice': date,
'partner_id': partner_id,
'account_id': self.ref('account.a_recv'),
'type': 'out_invoice',
})
self.registry('account.invoice.line').create(
self.cr, self.uid, {'name': "xxx",
'invoice_id': in_id,
'account_id': self.ref('account.a_sale'),
'price_unit': amount,
'quantity': 1,
})
workflow.trg_validate(
self.uid, 'account.invoice', in_id, 'invoice_open', self.cr)
return in_id
def create_refund(self, partner_id, date, amount):
in_id = self.registry('account.invoice').create(
self.cr, self.uid, {'reference_type': "none",
'date_invoice': date,
'partner_id': partner_id,
'account_id': self.ref('account.a_recv'),
'type': 'out_refund',
})
self.registry('account.invoice.line').create(
self.cr, self.uid, {'name': "xxx",
'invoice_id': in_id,
'account_id': self.ref('account.a_sale'),
'price_unit': amount,
'quantity': 1,
})
workflow.trg_validate(
self.uid, 'account.invoice', in_id, 'invoice_open', self.cr)
return in_id
def create_payment(self, date, amount, inv):
_type = inv.type in ('out_invoice', 'out_refund') and \
'receipt' or 'payment'
voucher_id = self.registry('account.voucher').create(
self.cr, self.uid, {'partner_id': inv.partner_id.id,
'type': _type,
'account_id': self.ref('account.a_recv'),
'date': date,
'amount': amount,
})
voucher_browse = self.registry('account.voucher').browse(
self.cr, self.uid, voucher_id)
line = self.registry('account.voucher').recompute_voucher_lines(
self.cr, self.uid, [voucher_id],
voucher_browse.partner_id.id,
voucher_browse.journal_id.id,
voucher_browse.amount,
voucher_browse.currency_id.id,
voucher_browse.type,
voucher_browse.date,
context=None)
line_cr = line['value']['line_cr_ids']
line_cr_ids = list()
for line in line_cr:
data = dict()
for key, value in line.items():
data[key] = value
data['voucher_id'] = voucher_id
line_cr_ids.append(
self.registry('account.voucher.line').create(
self.cr, self.uid, data))
self.registry('account.voucher').button_proforma_voucher(
self.cr, self.uid, [voucher_id], context=None)
voucher_browse = self.registry('account.voucher').browse(
self.cr, self.uid, voucher_id)
self.registry('account.move').post(
self.cr, self.uid, [voucher_browse.move_id.id])
def create_openSalesDoc(self, month, year):
wizard_id = self.registry('account.companyweb.report.wizard').create(
self.cr, self.uid,
{'chart_account_id': 1, 'month': month, 'year': year},
context=None)
self.registry('account.companyweb.report.wizard').create_openSalesDocs(
self.cr, self. uid, [wizard_id])
wizard = self.registry('account.companyweb.report.wizard').browse(
self.cr, self.uid, wizard_id)
import xlrd
import tempfile
file_path = tempfile.gettempdir() + '/file.xlsx'
data = wizard.data
f = open(file_path, 'wb')
f.write(data.decode('base64'))
f.close()
return xlrd.open_workbook(file_path)
def create_createdSalesDoc(self, month, year):
wizard_id = self.registry('account.companyweb.report.wizard').create(
self.cr, self.uid,
{'chart_account_id': 1, 'month': month, 'year': year},
context=None)
report_wizard = self.registry('account.companyweb.report.wizard')
report_wizard.create_createdSalesDocs(self.cr, self. uid, [wizard_id])
wizard = self.registry('account.companyweb.report.wizard').browse(
self.cr, self.uid, wizard_id)
import xlrd
import tempfile
file_path = tempfile.gettempdir() + '/file.xlsx'
data = wizard.data
f = open(file_path, 'wb')
f.write(data.decode('base64'))
f.close()
return xlrd.open_workbook(file_path)
def setUp(self):
super(companyweb_test, self).setUp()
company_id = self.ref('base.main_company')
company_model = self.registry('res.company')
company_model.write(
self.cr, self.uid, company_id, {'vat': 'BE0477472701'})
# set special=False on demo data periods
# TODO: remove when
# https://code.launchpad.net/~acsone-openerp/openobject-addons/7.0-bug-1281579-sbi/+merge/207311
# is merged
period_model = self.registry('account.period')
for n in range(1, 13):
period_id = self.ref('account.period_%d' % n)
period_model.write(self.cr, self.uid, period_id,
{'special': False})
def test_created_doc_companyweb(self):
date = YEAR + '-01-01'
amount = 1000
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, date, amount)
invoice = self.registry('account.invoice').browse(
self.cr, self.uid, in_id)
wb = self.create_createdSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == invoice.number):
trouve = True
ligne = i
i += 1
self.assertTrue(trouve, "Invoice found in xls file")
if (trouve):
self.assertAlmostEqual(
sheet.cell_value(ligne, 8), amount, 2, 'amount')
self.assertEquals(sheet.cell_value(ligne, 4), "I", "docType")
self.assertEquals(sheet.cell_value(ligne, 5), date, "date")
def test_created_doc_diffrent_month_companyweb(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
invoice = self.registry('account.invoice').browse(
self.cr, self.uid, in_id)
wb = self.create_createdSalesDoc("02", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == invoice.number):
trouve = True
i += 1
self.assertFalse(trouve, "Invoice found in xls file")
def test_open_doc_companyweb(self):
date = YEAR + '-01-01'
amount = 1000
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, date, amount)
invoice = self.registry('account.invoice').browse(
self.cr, self.uid, in_id)
wb = self.create_openSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == invoice.number):
trouve = True
ligne = i
i += 1
self.assertTrue(trouve, "Invoice found in xls file")
if (trouve):
self.assertAlmostEqual(
sheet.cell_value(ligne, 8), amount, 2, 'amount')
self.assertEquals(sheet.cell_value(ligne, 4), "I", "docType")
self.assertEquals(sheet.cell_value(ligne, 5), date, "date")
self.assertEquals(sheet.cell_value(ligne, 4), "I", "docType")
def test_open_doc_complete_reconcile_companyweb(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv = self.registry('account.invoice').browse(self.cr, self.uid, in_id)
self.create_payment(YEAR + '-01-20', 1000, inv)
wb = self.create_openSalesDoc("02", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == inv.number):
trouve = True
i += 1
self.assertFalse(trouve, "Invoice found in xls file")
def test_open_doc_partial_reconcile_1_companyweb(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv = self.registry('account.invoice').browse(self.cr, self.uid, in_id)
self.create_payment(YEAR + '-01-20', 500, inv)
wb = self.create_openSalesDoc("02", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == inv.number):
trouve = True
i += 1
self.assertTrue(trouve, "Invoice found in xls file")
def test_open_doc_partial_reconcile_2_companyweb(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv = self.registry('account.invoice').browse(self.cr, self.uid, in_id)
self.create_payment(YEAR + '-01-20', 500, inv)
self.create_payment(YEAR + '-02-20', 500, inv)
wb = self.create_openSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == inv.number):
trouve = True
i += 1
self.assertTrue(trouve, "Invoice found in xls file")
wb = self.create_openSalesDoc("02", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == inv.number):
trouve = True
i += 1
self.assertFalse(trouve, "Invoice found in xls file")
def test_open_doc_openAmount(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv = self.registry('account.invoice').browse(self.cr, self.uid, in_id)
self.create_payment(YEAR + '-01-20', 250, inv)
self.create_payment(YEAR + '-02-20', 750, inv)
wb = self.create_openSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
trouve = False
i = 1
while (i < sheet.nrows) and (not trouve):
if (sheet.cell_value(i, 3) == inv.number):
trouve = True
ligne = i
i += 1
self.assertTrue(trouve, "Invoice found in xls file")
self.assertAlmostEqual(sheet.cell_value(ligne, 9), 750, 2, 'amount')
def test_open_doc_custAcc(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id1 = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv1 = self.registry('account.invoice').browse(
self.cr, self.uid, in_id1)
in_id2 = self.create_invoice(partner_id, YEAR + '-01-01', 500)
inv2 = self.registry('account.invoice').browse(
self.cr, self.uid, in_id2)
wb = self.create_openSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
i = 1
ligne = list()
while (i < sheet.nrows):
if (sheet.cell_value(i, 3) == inv1.number or
sheet.cell_value(i, 3) == inv2.number):
ligne.append(i)
i += 1
self.assertAlmostEqual(
sheet.cell_value(ligne[0], 10), 1500, 2, 'amount')
self.assertAlmostEqual(
sheet.cell_value(
ligne[0], 10), sheet.cell_value(ligne[1], 10), 2, 'amount')
def test_custAcc_refund(self):
partner_id = self.registry('res.partner').create(
self.cr, self.uid, {'name': 'test', 'vat': 'BE0460392583', })
in_id = self.create_invoice(partner_id, YEAR + '-01-01', 1000)
inv = self.registry('account.invoice').browse(
self.cr, self.uid, in_id)
self.create_refund(partner_id, YEAR + '-02-02', 1000)
wb = self.create_openSalesDoc("01", YEAR)
sheet = wb.sheet_by_index(0)
i = 1
while (i < sheet.nrows):
if (sheet.cell_value(i, 3) == inv.number):
ligne = i
i += 1
self.assertAlmostEqual(
sheet.cell_value(ligne, 10), 1000, 2, 'amount')
wb = self.create_openSalesDoc("02", YEAR)
sheet = wb.sheet_by_index(0)
i = 1
while (i < sheet.nrows):
if (sheet.cell_value(i, 3) == inv.number):
ligne = i
i += 1
self.assertAlmostEqual(
sheet.cell_value(ligne, 10), 0, 2, 'amount')
|
akretion/l10n-belgium
|
account_companyweb/tests/test_companyweb.py
|
Python
|
agpl-3.0
| 16,078
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class SharebeesCom(DeadHoster):
__name__ = "SharebeesCom"
__type__ = "hoster"
__pattern__ = r"http://(?:\w*\.)*?sharebees.com/\w{12}"
__version__ = "0.02"
__description__ = """ShareBees hoster plugin"""
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
getInfo = create_getInfo(SharebeesCom)
|
wangjun/pyload
|
module/plugins/hoster/SharebeesCom.py
|
Python
|
gpl-3.0
| 446
|
"""Suite CodeWarrior suite: Terms for scripting the CodeWarrior IDE
Level 0, version 0
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'CWIE'
class CodeWarrior_suite_Events:
_argmap_add = {
'new' : 'kocl',
'with_data' : 'data',
'to_targets' : 'TTGT',
'to_group' : 'TGRP',
}
def add(self, _object, _attributes={}, **_arguments):
"""add: add elements to a project or target
Required argument: an AE object reference
Keyword argument new: the class of the new element or elements to add
Keyword argument with_data: the initial data for the element or elements
Keyword argument to_targets: the targets to which the new element or elements will be added
Keyword argument to_group: the group to which the new element or elements will be added
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'ADDF'
aetools.keysubst(_arguments, self._argmap_add)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def build(self, _no_object=None, _attributes={}, **_arguments):
"""build: build a project or target (equivalent of the Make menu command)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'MAKE'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def check(self, _object=None, _attributes={}, **_arguments):
"""check: check the syntax of a file in a project or target
Required argument: the file or files to be checked
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'CHEK'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def compile_file(self, _object=None, _attributes={}, **_arguments):
"""compile file: compile a file in a project or target
Required argument: the file or files to be compiled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'COMP'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def disassemble_file(self, _object=None, _attributes={}, **_arguments):
"""disassemble file: disassemble a file in a project or target
Required argument: the file or files to be disassembled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'DASM'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_export = {
'in_' : 'kfil',
}
def export(self, _no_object=None, _attributes={}, **_arguments):
"""export: Export the project file as an XML file
Keyword argument in_: the XML file in which to export the project
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'EXPT'
aetools.keysubst(_arguments, self._argmap_export)
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_object_code(self, _no_object=None, _attributes={}, **_arguments):
"""remove object code: remove object code from a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMOB'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_target_files(self, _object, _attributes={}, **_arguments):
"""remove target files: remove files from a target
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMFL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def run_target(self, _no_object=None, _attributes={}, **_arguments):
"""run target: run a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RUN '
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def touch_file(self, _object=None, _attributes={}, **_arguments):
"""touch file: touch a file in a project or target for compilation
Required argument: the file or files to be touched
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'TOCH'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def update(self, _no_object=None, _attributes={}, **_arguments):
"""update: bring a project or target up to date
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'UP2D'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class single_class_browser(aetools.ComponentItem):
"""single class browser - a single class browser """
want = '1BRW'
class _Prop_inherits(aetools.NProperty):
"""inherits - all properties and elements of the given class are inherited by this class. """
which = 'c@#^'
want = 'TXTD'
single_class_browsers = single_class_browser
class single_class_hierarchy(aetools.ComponentItem):
"""single class hierarchy - a single class hierarchy document """
want = '1HIR'
single_class_hierarchies = single_class_hierarchy
class class_browser(aetools.ComponentItem):
"""class browser - a class browser """
want = 'BROW'
class_browsers = class_browser
class file_compare_document(aetools.ComponentItem):
"""file compare document - a file compare document """
want = 'COMP'
file_compare_documents = file_compare_document
class catalog_document(aetools.ComponentItem):
"""catalog document - a browser catalog document """
want = 'CTLG'
catalog_documents = catalog_document
class editor_document(aetools.ComponentItem):
"""editor document - an editor document """
want = 'EDIT'
editor_documents = editor_document
class class_hierarchy(aetools.ComponentItem):
"""class hierarchy - a class hierarchy document """
want = 'HIER'
class_hierarchies = class_hierarchy
class project_inspector(aetools.ComponentItem):
"""project inspector - the project inspector """
want = 'INSP'
project_inspectors = project_inspector
class message_document(aetools.ComponentItem):
"""message document - a message document """
want = 'MSSG'
message_documents = message_document
class build_progress_document(aetools.ComponentItem):
"""build progress document - a build progress document """
want = 'PRGS'
build_progress_documents = build_progress_document
class project_document(aetools.ComponentItem):
"""project document - a project document """
want = 'PRJD'
class _Prop_current_target(aetools.NProperty):
"""current target - the current target """
which = 'CURT'
want = 'TRGT'
# element 'TRGT' as ['indx', 'name', 'test', 'rang']
project_documents = project_document
class subtarget(aetools.ComponentItem):
"""subtarget - a target that is prerequisite for another target """
want = 'SBTG'
class _Prop_link_against_output(aetools.NProperty):
"""link against output - is the output of this subtarget linked into its dependent target? """
which = 'LNKO'
want = 'bool'
class _Prop_target(aetools.NProperty):
"""target - the target that is dependent on this subtarget """
which = 'TrgT'
want = 'TRGT'
subtargets = subtarget
class target_file(aetools.ComponentItem):
"""target file - a source or header file in a target """
want = 'SRCF'
class _Prop_code_size(aetools.NProperty):
"""code size - the size of the code (in bytes) produced by compiling this source file """
which = 'CSZE'
want = 'long'
class _Prop_compiled_date(aetools.NProperty):
"""compiled date - the date and this source file was last compiled """
which = 'CMPD'
want = 'ldt '
class _Prop_data_size(aetools.NProperty):
"""data size - the size of the date (in bytes) produced by compiling this source file """
which = 'DSZE'
want = 'long'
class _Prop_debug(aetools.NProperty):
"""debug - is debugging information generated for this source file? """
which = 'DBUG'
want = 'bool'
class _Prop_dependents(aetools.NProperty):
"""dependents - the source files that need this source file in order to build """
which = 'DPND'
want = 'list'
class _Prop_id(aetools.NProperty):
"""id - the unique ID number of the target file """
which = 'ID '
want = 'long'
class _Prop_init_before(aetools.NProperty):
"""init before - is the \xd4initialize before\xd5 flag set for this shared library? """
which = 'INIT'
want = 'bool'
class _Prop_link_index(aetools.NProperty):
"""link index - the index of the source file in its target\xd5s link order (-1 if source file is not in link order) """
which = 'LIDX'
want = 'long'
class _Prop_linked(aetools.NProperty):
"""linked - is the source file in the link order of its target? """
which = 'LINK'
want = 'bool'
class _Prop_location(aetools.NProperty):
"""location - the location of the target file on disk """
which = 'FILE'
want = 'fss '
class _Prop_merge_output(aetools.NProperty):
"""merge output - is this shared library merged into another code fragment? """
which = 'MRGE'
want = 'bool'
class _Prop_modified_date(aetools.NProperty):
"""modified date - the date and time this source file was last modified """
which = 'MODD'
want = 'ldt '
class _Prop_path(aetools.NProperty):
"""path - the path of the source file on disk """
which = 'Path'
want = 'itxt'
class _Prop_prerequisites(aetools.NProperty):
"""prerequisites - the source files needed to build this source file """
which = 'PRER'
want = 'list'
class _Prop_type(aetools.NProperty):
"""type - the type of source file """
which = 'FTYP'
want = 'FTYP'
class _Prop_weak_link(aetools.NProperty):
"""weak link - is this shared library linked weakly? """
which = 'WEAK'
want = 'bool'
target_files = target_file
class symbol_browser(aetools.ComponentItem):
"""symbol browser - a symbol browser """
want = 'SYMB'
symbol_browsers = symbol_browser
class ToolServer_worksheet(aetools.ComponentItem):
"""ToolServer worksheet - a ToolServer worksheet """
want = 'TOOL'
ToolServer_worksheets = ToolServer_worksheet
class target(aetools.ComponentItem):
"""target - a target in a project """
want = 'TRGT'
class _Prop_name(aetools.NProperty):
"""name - """
which = 'pnam'
want = 'itxt'
class _Prop_project_document(aetools.NProperty):
"""project document - the project document that contains this target """
which = 'PrjD'
want = 'PRJD'
# element 'SBTG' as ['indx', 'test', 'rang']
# element 'SRCF' as ['indx', 'test', 'rang']
targets = target
class text_document(aetools.ComponentItem):
"""text document - a document that contains text """
want = 'TXTD'
class _Prop_modified(aetools.NProperty):
"""modified - Has the document been modified since the last save? """
which = 'imod'
want = 'bool'
class _Prop_selection(aetools.NProperty):
"""selection - the selection visible to the user """
which = 'sele'
want = 'csel'
# element 'cha ' as ['indx', 'rele', 'rang', 'test']
# element 'cins' as ['rele']
# element 'clin' as ['indx', 'rang', 'rele']
# element 'ctxt' as ['rang']
text_documents = text_document
single_class_browser._superclassnames = ['text_document']
single_class_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
single_class_browser._privelemdict = {
}
import Standard_Suite
single_class_hierarchy._superclassnames = ['document']
single_class_hierarchy._privpropdict = {
'inherits' : _Prop_inherits,
}
single_class_hierarchy._privelemdict = {
}
class_browser._superclassnames = ['text_document']
class_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
class_browser._privelemdict = {
}
file_compare_document._superclassnames = ['text_document']
file_compare_document._privpropdict = {
'inherits' : _Prop_inherits,
}
file_compare_document._privelemdict = {
}
catalog_document._superclassnames = ['text_document']
catalog_document._privpropdict = {
'inherits' : _Prop_inherits,
}
catalog_document._privelemdict = {
}
editor_document._superclassnames = ['text_document']
editor_document._privpropdict = {
'inherits' : _Prop_inherits,
}
editor_document._privelemdict = {
}
class_hierarchy._superclassnames = ['document']
class_hierarchy._privpropdict = {
'inherits' : _Prop_inherits,
}
class_hierarchy._privelemdict = {
}
project_inspector._superclassnames = ['document']
project_inspector._privpropdict = {
'inherits' : _Prop_inherits,
}
project_inspector._privelemdict = {
}
message_document._superclassnames = ['text_document']
message_document._privpropdict = {
'inherits' : _Prop_inherits,
}
message_document._privelemdict = {
}
build_progress_document._superclassnames = ['document']
build_progress_document._privpropdict = {
'inherits' : _Prop_inherits,
}
build_progress_document._privelemdict = {
}
project_document._superclassnames = ['document']
project_document._privpropdict = {
'current_target' : _Prop_current_target,
'inherits' : _Prop_inherits,
}
project_document._privelemdict = {
'target' : target,
}
subtarget._superclassnames = ['target']
subtarget._privpropdict = {
'inherits' : _Prop_inherits,
'link_against_output' : _Prop_link_against_output,
'target' : _Prop_target,
}
subtarget._privelemdict = {
}
target_file._superclassnames = []
target_file._privpropdict = {
'code_size' : _Prop_code_size,
'compiled_date' : _Prop_compiled_date,
'data_size' : _Prop_data_size,
'debug' : _Prop_debug,
'dependents' : _Prop_dependents,
'id' : _Prop_id,
'init_before' : _Prop_init_before,
'link_index' : _Prop_link_index,
'linked' : _Prop_linked,
'location' : _Prop_location,
'merge_output' : _Prop_merge_output,
'modified_date' : _Prop_modified_date,
'path' : _Prop_path,
'prerequisites' : _Prop_prerequisites,
'type' : _Prop_type,
'weak_link' : _Prop_weak_link,
}
target_file._privelemdict = {
}
symbol_browser._superclassnames = ['text_document']
symbol_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
symbol_browser._privelemdict = {
}
ToolServer_worksheet._superclassnames = ['text_document']
ToolServer_worksheet._privpropdict = {
'inherits' : _Prop_inherits,
}
ToolServer_worksheet._privelemdict = {
}
target._superclassnames = []
target._privpropdict = {
'name' : _Prop_name,
'project_document' : _Prop_project_document,
}
target._privelemdict = {
'subtarget' : subtarget,
'target_file' : target_file,
}
text_document._superclassnames = ['document']
text_document._privpropdict = {
'inherits' : _Prop_inherits,
'modified' : _Prop_modified,
'selection' : _Prop_selection,
}
text_document._privelemdict = {
'character' : Standard_Suite.character,
'insertion_point' : Standard_Suite.insertion_point,
'line' : Standard_Suite.line,
'text' : Standard_Suite.text,
}
_Enum_DKND = {
'project' : 'PRJD', # a project document
'editor_document' : 'EDIT', # an editor document
'message' : 'MSSG', # a message document
'file_compare' : 'COMP', # a file compare document
'catalog_document' : 'CTLG', # a browser catalog
'class_browser' : 'BROW', # a class browser document
'single_class_browser' : '1BRW', # a single class browser document
'symbol_browser' : 'SYMB', # a symbol browser document
'class_hierarchy' : 'HIER', # a class hierarchy document
'single_class_hierarchy' : '1HIR', # a single class hierarchy document
'project_inspector' : 'INSP', # a project inspector
'ToolServer_worksheet' : 'TOOL', # the ToolServer worksheet
'build_progress_document' : 'PRGS', # the build progress window
}
_Enum_FTYP = {
'library_file' : 'LIBF', # a library file
'project_file' : 'PRJF', # a project file
'resource_file' : 'RESF', # a resource file
'text_file' : 'TXTF', # a text file
'unknown_file' : 'UNKN', # unknown file type
}
_Enum_Inte = {
'never_interact' : 'eNvr', # never allow user interactions
'interact_with_self' : 'eInS', # allow user interaction only when an AppleEvent is sent from within CodeWarrior
'interact_with_local' : 'eInL', # allow user interaction when AppleEvents are sent from applications on the same machine (default)
'interact_with_all' : 'eInA', # allow user interaction from both local and remote AppleEvents
}
_Enum_PERM = {
'read_write' : 'RdWr', # the file is open with read/write permission
'read_only' : 'Read', # the file is open with read/only permission
'checked_out_read_write' : 'CkRW', # the file is checked out with read/write permission
'checked_out_read_only' : 'CkRO', # the file is checked out with read/only permission
'checked_out_read_modify' : 'CkRM', # the file is checked out with read/modify permission
'locked' : 'Lock', # the file is locked on disk
'none' : 'LNNO', # the file is new
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'1BRW' : single_class_browser,
'1HIR' : single_class_hierarchy,
'BROW' : class_browser,
'COMP' : file_compare_document,
'CTLG' : catalog_document,
'EDIT' : editor_document,
'HIER' : class_hierarchy,
'INSP' : project_inspector,
'MSSG' : message_document,
'PRGS' : build_progress_document,
'PRJD' : project_document,
'SBTG' : subtarget,
'SRCF' : target_file,
'SYMB' : symbol_browser,
'TOOL' : ToolServer_worksheet,
'TRGT' : target,
'TXTD' : text_document,
}
_propdeclarations = {
'CMPD' : _Prop_compiled_date,
'CSZE' : _Prop_code_size,
'CURT' : _Prop_current_target,
'DBUG' : _Prop_debug,
'DPND' : _Prop_dependents,
'DSZE' : _Prop_data_size,
'FILE' : _Prop_location,
'FTYP' : _Prop_type,
'ID ' : _Prop_id,
'INIT' : _Prop_init_before,
'LIDX' : _Prop_link_index,
'LINK' : _Prop_linked,
'LNKO' : _Prop_link_against_output,
'MODD' : _Prop_modified_date,
'MRGE' : _Prop_merge_output,
'PRER' : _Prop_prerequisites,
'Path' : _Prop_path,
'PrjD' : _Prop_project_document,
'TrgT' : _Prop_target,
'WEAK' : _Prop_weak_link,
'c@#^' : _Prop_inherits,
'imod' : _Prop_modified,
'pnam' : _Prop_name,
'sele' : _Prop_selection,
}
_compdeclarations = {
}
_enumdeclarations = {
'DKND' : _Enum_DKND,
'FTYP' : _Enum_FTYP,
'Inte' : _Enum_Inte,
'PERM' : _Enum_PERM,
}
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/lib-scriptpackages/CodeWarrior/CodeWarrior_suite.py
|
Python
|
mit
| 23,097
|
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
class QuestionManager(models.Manager):
def available(self, *args, **kwargs):
qs = self.get_queryset()
return qs.filter(available=True)
class SurveyResultManager(models.Manager):
def create_objects(self, user, instrument):
"""Creates SurveyResult objects for the given user's responses to the
specified Instrument.
Returns a QuerySet of the created SurveyResponse objects.
"""
created_ids = set() # IDs for created objects.
# ONLY applicable to Likert Questions.
qtypes = {t.lower() for t, q in instrument.questions}
if 'likertquestion' not in qtypes and len(qtypes) != 1:
raise ValueError("A SurveyResult is only valid for LikertQuestions")
questions = [q for qt, q in instrument.questions]
if len(questions) % 2 != 0:
# Maybe survey is incomplete?
raise ValueError("Instruments must have even number of questions")
# TODO: There's probably a nicer way to do this :-/
# Split the questions into two subscales; they should be ordered
# correctly by default.
middle = int(len(questions) - len(questions) / 2)
a, b = questions[:middle], questions[middle:]
try:
for q1, q2 in zip(a, b):
# Q1 - Q2; discard anything less than zero (keep 0)
# We need the user's responses to these questions.
r1 = q1.likertresponse_set.filter(user=user).latest()
r2 = q2.likertresponse_set.filter(user=user).latest()
score = max(r1.selected_option - r2.selected_option, 0)
labels = list(set(q1.labels + q2.labels))
obj = self.create(
user=user,
instrument=instrument,
score=score,
labels=labels
)
created_ids.add(obj.id)
except ObjectDoesNotExist:
return self.get_queryset().none()
return self.get_queryset().filter(id__in=created_ids)
|
tndatacommons/tndata_backend
|
tndata_backend/survey/managers.py
|
Python
|
mit
| 2,163
|
from langkit.lexer import Lexer, LexerToken, Literal, WithText, \
WithSymbol, Pattern, Ignore
class Token(LexerToken):
# Keywords
Config = WithText()
Menuconfig = WithText()
Choice = WithText()
Endchoice = WithText()
Comment = WithText()
Menu = WithText()
Endmenu = WithText()
If = WithText()
Endif = WithText()
Source = WithText()
Mainmenu = WithText()
Depends = WithText()
On = WithText()
Help = WithText()
Prompt = WithText()
Default = WithText()
Select = WithText()
Imply = WithText()
Range = WithText()
Visible = WithText()
Option = WithText()
OptDefConfigList = WithText()
OptModules = WithText()
OptEnv = WithText()
OptAllNoConfY = WithText()
# The help text is terminated by an empty line
EmptyLine = WithText()
String = WithText()
# Types
Tristate = WithText()
Bool = WithText()
DefTristate = WithText()
DefBool = WithText()
Int = WithText()
Hex = WithText()
StringType = WithText()
Identifier = WithSymbol()
Number = WithSymbol()
HexNumber = WithSymbol()
Yes = WithSymbol()
No = WithSymbol()
Module = WithSymbol()
LPar = WithText()
RPar = WithText()
Equal = WithText()
Different = WithText()
Not = WithText()
Or = WithText()
And = WithText()
kconfig_lexer = Lexer(Token)
kconfig_lexer.add_rules(
(Pattern(r"[ \t\r\n]+"), Ignore()),
(Pattern(r"#.*"), Ignore()),
# Keywords
(Literal("config"), Token.Config),
(Literal("menuconfig"), Token.Menuconfig),
(Literal("choice"), Token.Choice),
(Literal("endchoice"), Token.Endchoice),
(Literal("comment"), Token.Comment),
(Literal("menu"), Token.Menu),
(Literal("endmenu"), Token.Endmenu),
(Literal("if"), Token.If),
(Literal("endif"), Token.Endif),
(Literal("source"), Token.Source),
(Literal("mainmenu"), Token.Mainmenu),
(Literal("depends"), Token.Depends),
(Literal("on"), Token.On),
(Literal("help"), Token.Help),
(Literal("--help--"), Token.Help),
(Literal("prompt"), Token.Prompt),
(Literal("default"), Token.Default),
(Literal("select"), Token.Select),
(Literal("imply"), Token.Imply),
(Literal("range"), Token.Range),
(Literal("visible"), Token.Visible),
(Literal("option"), Token.Option),
# Options
(Literal("defconfig_list"), Token.OptDefConfigList),
(Literal("modules"), Token.OptModules),
(Literal("env"), Token.OptEnv),
(Literal("allnoconfig_y"), Token.OptAllNoConfY),
# Types
(Literal("tristate"), Token.Tristate),
(Literal("bool"), Token.Bool),
(Literal("def_tristate"), Token.DefTristate),
(Literal("def_bool"), Token.DefBool),
(Literal("int"), Token.Int),
(Literal("hex"), Token.Hex),
(Literal("string"), Token.StringType),
(Literal("="), Token.Equal),
(Literal("!="), Token.Different),
(Literal("("), Token.LPar),
(Literal(")"), Token.RPar),
(Literal("!"), Token.Not),
(Literal("&&"), Token.And),
(Literal("||"), Token.Or),
(Literal("y"), Token.Yes),
(Literal("n"), Token.No),
(Literal("m"), Token.Module),
(Pattern(r"[a-zA-Z][a-zA-Z0-9_]*"), Token.Identifier),
(Pattern(r"[0-9]+"), Token.Number),
(Pattern(r"0x[0-9]+"), Token.HexNumber),
(Pattern(r'\"(\"\"|(\[\"([0-9A-F][0-9A-F]){2,4}\"\])|[^\n\"])*\"'), Token.String),
)
|
Fabien-Chouteau/libkconfiglang
|
kconfig/language/lexer.py
|
Python
|
gpl-3.0
| 3,883
|
from dataclasses import dataclass
from mitmproxy import connection
from . import commands
@dataclass
class ClientConnectedHook(commands.StartHook):
"""
A client has connected to mitmproxy. Note that a connection can
correspond to multiple HTTP requests.
Setting client.error kills the connection.
"""
client: connection.Client
@dataclass
class ClientDisconnectedHook(commands.StartHook):
"""
A client connection has been closed (either by us or the client).
"""
blocking = False
client: connection.Client
@dataclass
class ServerConnectionHookData:
"""Event data for server connection event hooks."""
server: connection.Server
"""The server connection this hook is about."""
client: connection.Client
"""The client on the other end."""
@dataclass
class ServerConnectHook(commands.StartHook):
"""
Mitmproxy is about to connect to a server.
Note that a connection can correspond to multiple requests.
Setting data.server.error kills the connection.
"""
data: ServerConnectionHookData
@dataclass
class ServerConnectedHook(commands.StartHook):
"""
Mitmproxy has connected to a server.
"""
blocking = False
data: ServerConnectionHookData
@dataclass
class ServerDisconnectedHook(commands.StartHook):
"""
A server connection has been closed (either by us or the server).
"""
blocking = False
data: ServerConnectionHookData
|
mhils/mitmproxy
|
mitmproxy/proxy/server_hooks.py
|
Python
|
mit
| 1,459
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round
from odoo.addons import decimal_precision as dp
class MrpWorkorder(models.Model):
_name = 'mrp.workorder'
_description = 'Work Order'
_inherit = ['mail.thread', 'mail.activity.mixin', 'mrp.abstract.workorder']
name = fields.Char(
'Work Order', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
workcenter_id = fields.Many2one(
'mrp.workcenter', 'Work Center', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
working_state = fields.Selection(
'Workcenter Status', related='workcenter_id.working_state', readonly=False,
help='Technical: used in views only')
production_availability = fields.Selection(
'Stock Availability', readonly=True,
related='production_id.reservation_state', store=True,
help='Technical: used in views and domains only.')
production_state = fields.Selection(
'Production State', readonly=True,
related='production_id.state',
help='Technical: used in views only.')
qty_production = fields.Float('Original Production Quantity', readonly=True, related='production_id.product_qty')
qty_remaining = fields.Float('Quantity To Be Produced', compute='_compute_qty_remaining', digits=dp.get_precision('Product Unit of Measure'))
qty_produced = fields.Float(
'Quantity', default=0.0,
readonly=True,
digits=dp.get_precision('Product Unit of Measure'),
help="The number of products already handled by this work order")
is_produced = fields.Boolean(string="Has Been Produced",
compute='_compute_is_produced')
state = fields.Selection([
('pending', 'Waiting for another WO'),
('ready', 'Ready'),
('progress', 'In Progress'),
('done', 'Finished'),
('cancel', 'Cancelled')], string='Status',
default='pending')
date_planned_start = fields.Datetime(
'Scheduled Date Start',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_planned_finished = fields.Datetime(
'Scheduled Date Finished',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_start = fields.Datetime(
'Effective Start Date',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_finished = fields.Datetime(
'Effective End Date',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
duration_expected = fields.Float(
'Expected Duration', digits=(16, 2),
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Expected duration (in minutes)")
duration = fields.Float(
'Real Duration', compute='_compute_duration',
readonly=True, store=True)
duration_unit = fields.Float(
'Duration Per Unit', compute='_compute_duration',
readonly=True, store=True)
duration_percent = fields.Integer(
'Duration Deviation (%)', compute='_compute_duration',
group_operator="avg", readonly=True, store=True)
operation_id = fields.Many2one(
'mrp.routing.workcenter', 'Operation') # Should be used differently as BoM can change in the meantime
worksheet = fields.Binary(
'Worksheet', related='operation_id.worksheet', readonly=True)
move_raw_ids = fields.One2many(
'stock.move', 'workorder_id', 'Moves')
move_line_ids = fields.One2many(
'stock.move.line', 'workorder_id', 'Moves to Track',
help="Inventory moves for which you must scan a lot number at this work order")
final_lot_id = fields.Many2one(
'stock.production.lot', 'Lot/Serial Number', domain="[('product_id', '=', product_id)]",
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
time_ids = fields.One2many(
'mrp.workcenter.productivity', 'workorder_id')
is_user_working = fields.Boolean(
'Is the Current User Working', compute='_compute_working_users',
help="Technical field indicating whether the current user is working. ")
working_user_ids = fields.One2many('res.users', string='Working user on this work order.', compute='_compute_working_users')
last_working_user_id = fields.One2many('res.users', string='Last user that worked on this work order.', compute='_compute_working_users')
next_work_order_id = fields.Many2one('mrp.workorder', "Next Work Order")
scrap_ids = fields.One2many('stock.scrap', 'workorder_id')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
production_date = fields.Datetime('Production Date', related='production_id.date_planned_start', store=True, readonly=False)
color = fields.Integer('Color', compute='_compute_color')
capacity = fields.Float(
'Capacity', default=1.0,
help="Number of pieces that can be produced in parallel.")
workorder_line_ids = fields.One2many('mrp.workorder.line', 'workorder_id', string='Workorder lines')
final_lot_domain = fields.One2many('stock.production.lot', compute="_compute_final_lot_domain")
@api.depends('production_id')
def _compute_final_lot_domain(self):
for wo in self:
# check if self is not the first workorder in the list
if self.env['mrp.workorder'].search([('next_work_order_id', '=', wo.id)]):
wo.final_lot_domain = self.env['stock.production.lot'].search([
('use_next_on_work_order_id', '=', wo.id),
]).ids
else:
wo.final_lot_domain = self.env['stock.production.lot'].search([
('product_id', '=', wo.product_id.id),
]).ids
@api.multi
def name_get(self):
return [(wo.id, "%s - %s - %s" % (wo.production_id.name, wo.product_id.name, wo.name)) for wo in self]
@api.one
@api.depends('production_id.product_qty', 'qty_produced')
def _compute_is_produced(self):
rounding = self.production_id.product_uom_id.rounding
self.is_produced = float_compare(self.qty_produced, self.production_id.product_qty, precision_rounding=rounding) >= 0
@api.one
@api.depends('time_ids.duration', 'qty_produced')
def _compute_duration(self):
self.duration = sum(self.time_ids.mapped('duration'))
self.duration_unit = round(self.duration / max(self.qty_produced, 1), 2) # rounding 2 because it is a time
if self.duration_expected:
self.duration_percent = 100 * (self.duration_expected - self.duration) / self.duration_expected
else:
self.duration_percent = 0
def _compute_working_users(self):
""" Checks whether the current user is working, all the users currently working and the last user that worked. """
for order in self:
order.working_user_ids = [(4, order.id) for order in order.time_ids.filtered(lambda time: not time.date_end).sorted('date_start').mapped('user_id')]
if order.working_user_ids:
order.last_working_user_id = order.working_user_ids[-1]
elif order.time_ids:
order.last_working_user_id = order.time_ids.sorted('date_end')[-1].user_id
if order.time_ids.filtered(lambda x: (x.user_id.id == self.env.user.id) and (not x.date_end) and (x.loss_type in ('productive', 'performance'))):
order.is_user_working = True
else:
order.is_user_working = False
@api.multi
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('workorder_id', 'in', self.ids)], ['workorder_id'], ['workorder_id'])
count_data = dict((item['workorder_id'][0], item['workorder_id_count']) for item in data)
for workorder in self:
workorder.scrap_count = count_data.get(workorder.id, 0)
@api.multi
@api.depends('date_planned_finished', 'production_id.date_planned_finished')
def _compute_color(self):
late_orders = self.filtered(lambda x: x.production_id.date_planned_finished and x.date_planned_finished > x.production_id.date_planned_finished)
for order in late_orders:
order.color = 4
for order in (self - late_orders):
order.color = 2
@api.multi
def write(self, values):
if list(values.keys()) != ['time_ids'] and any(workorder.state == 'done' for workorder in self):
raise UserError(_('You can not change the finished work order.'))
if 'date_planned_start' in values or 'date_planned_finished' in values:
for workorder in self:
start_date = fields.Datetime.to_datetime(values.get('date_planned_start')) or workorder.date_planned_start
end_date = fields.Datetime.to_datetime(values.get('date_planned_finished')) or workorder.date_planned_finished
if start_date and end_date and start_date > end_date:
raise UserError(_('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.'))
return super(MrpWorkorder, self).write(values)
def generate_wo_lines(self):
""" Generate workorder line """
self.ensure_one()
raw_moves = self.move_raw_ids.filtered(
lambda move: move.state not in ('done', 'cancel')
)
for move in raw_moves:
qty_to_consume = move.product_uom._compute_quantity(
self.qty_producing * move.unit_factor,
move.product_id.uom_id,
round=False
)
line_values = self._generate_lines_values(move, qty_to_consume)
self.workorder_line_ids |= self.env['mrp.workorder.line'].create(line_values)
def _assign_default_final_lot_id(self):
self.final_lot_id = self.env['stock.production.lot'].search([('use_next_on_work_order_id', '=', self.id)],
order='create_date, id', limit=1)
def _get_byproduct_move_line(self, by_product_move, quantity):
return {
'move_id': by_product_move.id,
'product_id': by_product_move.product_id.id,
'product_uom_qty': quantity,
'product_uom_id': by_product_move.product_uom.id,
'qty_done': quantity,
'location_id': by_product_move.location_id.id,
'location_dest_id': by_product_move.location_dest_id.id,
}
@api.multi
def record_production(self):
if not self:
return True
self.ensure_one()
if self.qty_producing <= 0:
raise UserError(_('Please set the quantity you are currently producing. It should be different from zero.'))
# One a piece is produced, you can launch the next work order
if self.next_work_order_id.state == 'pending':
self.next_work_order_id.state = 'ready'
# If last work order, then post lots used
# TODO: should be same as checking if for every workorder something has been done?
if not self.next_work_order_id:
self._update_finished_move()
# Transfer quantities from temporary to final move line or make them final
self._update_raw_moves()
# Update workorder quantity produced
self.qty_produced += self.qty_producing
if self.final_lot_id:
self.final_lot_id.use_next_on_work_order_id = self.next_work_order_id
self.final_lot_id = False
# Set a qty producing
rounding = self.production_id.product_uom_id.rounding
if float_compare(self.qty_produced, self.production_id.product_qty, precision_rounding=rounding) >= 0:
self.qty_producing = 0
elif self.production_id.product_id.tracking == 'serial':
self._assign_default_final_lot_id()
self.qty_producing = 1.0
self.generate_wo_lines()
else:
self.qty_producing = float_round(self.production_id.product_qty - self.qty_produced, precision_rounding=rounding)
self.generate_wo_lines()
if self.next_work_order_id and self.production_id.product_id.tracking != 'none':
self.next_work_order_id._assign_default_final_lot_id()
if float_compare(self.qty_produced, self.production_id.product_qty, precision_rounding=rounding) >= 0:
self.button_finish()
return True
@api.multi
def button_start(self):
self.ensure_one()
# As button_start is automatically called in the new view
if self.state in ('done', 'cancel'):
return True
# Need a loss in case of the real time exceeding the expected
timeline = self.env['mrp.workcenter.productivity']
if self.duration < self.duration_expected:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','productive')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
else:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
if self.production_id.state != 'progress':
self.production_id.write({
'date_start': datetime.now(),
})
timeline.create({
'workorder_id': self.id,
'workcenter_id': self.workcenter_id.id,
'description': _('Time Tracking: ')+self.env.user.name,
'loss_id': loss_id[0].id,
'date_start': datetime.now(),
'user_id': self.env.user.id
})
return self.write({'state': 'progress',
'date_start': datetime.now(),
})
@api.multi
def button_finish(self):
self.ensure_one()
self.end_all()
return self.write({'state': 'done', 'date_finished': fields.Datetime.now()})
@api.multi
def end_previous(self, doall=False):
"""
@param: doall: This will close all open time lines on the open work orders when doall = True, otherwise
only the one of the current user
"""
# TDE CLEANME
timeline_obj = self.env['mrp.workcenter.productivity']
domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)]
if not doall:
domain.append(('user_id', '=', self.env.user.id))
not_productive_timelines = timeline_obj.browse()
for timeline in timeline_obj.search(domain, limit=None if doall else 1):
wo = timeline.workorder_id
if wo.duration_expected <= wo.duration:
if timeline.loss_type == 'productive':
not_productive_timelines += timeline
timeline.write({'date_end': fields.Datetime.now()})
else:
maxdate = fields.Datetime.from_string(timeline.date_start) + relativedelta(minutes=wo.duration_expected - wo.duration)
enddate = datetime.now()
if maxdate > enddate:
timeline.write({'date_end': enddate})
else:
timeline.write({'date_end': maxdate})
not_productive_timelines += timeline.copy({'date_start': maxdate, 'date_end': enddate})
if not_productive_timelines:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type', '=', 'performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
not_productive_timelines.write({'loss_id': loss_id.id})
return True
@api.multi
def end_all(self):
return self.end_previous(doall=True)
@api.multi
def button_pending(self):
self.end_previous()
return True
@api.multi
def button_unblock(self):
for order in self:
order.workcenter_id.unblock()
return True
@api.multi
def action_cancel(self):
return self.write({'state': 'cancel'})
@api.multi
def button_done(self):
if any([x.state in ('done', 'cancel') for x in self]):
raise UserError(_('A Manufacturing Order is already done or cancelled.'))
self.end_all()
return self.write({'state': 'done',
'date_finished': datetime.now()})
@api.multi
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_workorder_id': self.id, 'default_production_id': self.production_id.id, 'product_ids': (self.production_id.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.production_id.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids},
# 'context': {'product_ids': self.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')).mapped('product_id').ids + [self.production_id.product_id.id]},
'target': 'new',
}
@api.multi
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
action['domain'] = [('workorder_id', '=', self.id)]
return action
@api.multi
@api.depends('qty_production', 'qty_produced')
def _compute_qty_remaining(self):
for wo in self:
wo.qty_remaining = float_round(wo.qty_production - wo.qty_produced, precision_rounding=wo.production_id.product_uom_id.rounding)
class MrpWorkorderLine(models.Model):
_name = 'mrp.workorder.line'
_inherit = ["mrp.abstract.workorder.line"]
_description = "Workorder move line"
workorder_id = fields.Many2one('mrp.workorder', 'Workorder')
def _get_final_lot(self):
return self.workorder_id.final_lot_id
def _get_production(self):
return self.workorder_id.production_id
|
t3dev/odoo
|
addons/mrp/models/mrp_workorder.py
|
Python
|
gpl-3.0
| 19,034
|
import io
import os
import re
import sys
from glob import glob
from invoke import task
DOCS_PORT = os.environ.get("DOCS_PORT", 8000)
#: branch prefixes for which some checks are skipped
SPECIAL_BRANCHES = ("master", "develop", "release")
@task
def clean(c):
""" Remove artifacts and binary files. """
c.run("python setup.py clean --all")
patterns = ["build", "dist"]
patterns.extend(glob("*.egg*"))
patterns.append("docs/_build")
patterns.append("**/*.pyc")
for pattern in patterns:
c.run("rm -rf {}".format(pattern))
@task
def lint(c):
""" Run linting tox environments. """
c.run("tox -epep8,isort,black,pypi-description")
@task # NOQA
def format(c): # NOQA
""" Run code formatting tasks. """
c.run("tox -eblacken,isort_format")
@task
def towncrier_check(c): # NOQA
""" Check towncrier files. """
output = io.StringIO()
c.run("git branch --contains HEAD", out_stream=output)
skipped_branch_prefix = ["pull/", "develop", "master", "HEAD"]
# cleanup branch names by removing PR-only names in local, remote and disconnected branches to ensure the current
# (i.e. user defined) branch name is used
branches = list(
filter(
lambda x: x and all(not x.startswith(part) for part in skipped_branch_prefix),
(
branch.replace("origin/", "").replace("remotes/", "").strip("* (")
for branch in output.getvalue().split("\n")
),
)
)
print("Candidate branches", ", ".join(output.getvalue().split("\n")))
if not branches:
# if no branch name matches, we are in one of the excluded branches above, so we just exit
print("Skip check, branch excluded by configuration")
return
branch = branches[0]
towncrier_file = None
for branch in branches:
if any(branch.startswith(prefix) for prefix in SPECIAL_BRANCHES):
sys.exit(0)
try:
parts = re.search(r"(?P<type>\w+)/\D*(?P<number>\d+)\D*", branch).groups()
towncrier_file = os.path.join("changes", "{1}.{0}".format(*parts))
if not os.path.exists(towncrier_file) or os.path.getsize(towncrier_file) == 0:
print(
"=========================\n"
"Current tree does not contain the towncrier file {} or file is empty\n"
"please check CONTRIBUTING documentation.\n"
"========================="
"".format(towncrier_file)
)
sys.exit(2)
else:
break
except AttributeError:
pass
if not towncrier_file:
print(
"=========================\n"
"Branch {} does not respect the '<type>/(<optional-task-type>-)<number>-description' format\n"
"=========================\n"
"".format(branch)
)
sys.exit(1)
@task
def test(c):
""" Run test in local environment. """
c.run("python setup.py test")
@task
def test_all(c):
""" Run all tox environments. """
c.run("tox")
@task
def coverage(c):
""" Run test with coverage in local environment. """
c.run("coverage erase")
c.run("run setup.py test")
c.run("report -m")
@task
def tag_release(c, level):
""" Tag release version. """
c.run("bumpversion --list %s --no-tag" % level)
@task
def tag_dev(c, level="patch"):
""" Tag development version. """
c.run("bumpversion --list %s --message='Bump develop version [ci skip]' --no-tag" % level)
@task(pre=[clean])
def docbuild(c):
""" Build documentation. """
os.chdir("docs")
build_dir = os.environ.get("BUILD_DIR", "_build/html")
c.run("python -msphinx -W -b html -d _build/doctrees . %s" % build_dir)
@task(docbuild)
def docserve(c):
""" Serve docs at http://localhost:$DOCS_PORT/ (default port is 8000). """
from livereload import Server
server = Server()
server.watch("docs/conf.py", lambda: docbuild(c))
server.watch("CONTRIBUTING.rst", lambda: docbuild(c))
server.watch("docs/*.rst", lambda: docbuild(c))
server.serve(port=DOCS_PORT, root="_build/html")
|
nephila/djangocms-blog
|
tasks.py
|
Python
|
bsd-3-clause
| 4,218
|
"""XML migration script by David Jonas
This script migrates XML files into Plone Objects
Supposed to be run as an external method trhough the boilerplate script migration.py
(Instructions: http://plone.org/documentation/kb/create-and-use-an-external-method )
"""
import libxml2
import urllib2
import AccessControl
import transaction
import time
import sys
from DateTime import DateTime
from plone.i18n.normalizer import idnormalizer
from Testing.makerequest import makerequest
from Products.CMFCore.utils import getToolByName
from Acquisition import aq_inner
try:
from collective.contentleadimage.config import IMAGE_FIELD_NAME
from collective.contentleadimage.config import IMAGE_CAPTION_FIELD_NAME
from collective.contentleadimage.interfaces import ILeadImageable
import collective.contentleadimage
LEADIMAGE_EXISTS = True
except ImportException:
LEADIMAGE_EXISTS = False
# Folder where the images are (Do not forget to add a trailing slash)
IMAGE_FOLDER = "/var/plone4_/zeocluster/src/porseleinImages/"
class ObjectItem:
"""Class to store an Object from the xml file"""
def __init__(self):
self.priref = ""
self.tags = []
self.body = ""
self.label_text = ""
self.images = []
self.object_number = ""
self.object_name = ""
self.title = ""
self.production_place = ""
self.production_date_start = ""
self.production_date_end = ""
self.production_period = ""
self.materials = []
self.dimention_types = []
self.dimention_values = []
self.dimention_units = []
self.creator = ""
self.checked = False
def Title(self):
if self.title != "":
return self.title
elif self.object_number != "" and self.object_name != "":
return "%s, %s"%(self.object_number, self.object_name)
elif self.object_name != "":
return self.object_name
elif self.object_number != "":
return self.object_number
else:
return self.priref
def Materials(self):
return ", ".join(self.materials)
class XMLMigrator:
""" Gets an XML file, parses it and creates the content in the chosen plone instance """
def __init__(self, portal, xmlFilePath, typeToCreate, folder):
"""Constructor that gets access to both the parsed file and the chosen portal"""
print("INITIALIZING CONTENT MIGRATOR")
#check if portal exists
self.portal = portal
#Parse the XML file
self.xmlDoc = libxml2.parseFile(xmlFilePath)
#Set the migration mode
self.typeToCreate = typeToCreate
#Save the path to the folder to migrate to
self.folderPath = folder.split("/")
#Initialize the counters for the log
self.errors = 0 #Number of errors - failed to create an item
self.created = 0 #Number of sucessfully created items
self.skipped = 0 #Number of items skipped because another item with the same id already exists on that folder.
#DEBUG
self.fields = []
def cleanUp(self):
self.xmlDoc.freeDoc()
return
def getContainer(self):
#if there is no folder info, fail.
if len(self.folderPath) == 0:
print("Folder check failed")
return None
#Set the container to the root object of the portal
container = self.portal
#Navigate the folders creating them if necessary
for folder in self.folderPath:
if hasattr(container, folder):
container = container[folder]
else:
print ("== Chosen folder " + folder + " does not exist. Creating new folder ==")
container.invokeFactory(type_name="Folder", id=folder, title="migration of type: " + self.typeToCreate)
container = container[folder]
return container
def getOrCreateFolder(self, container, folderId, publish):
#Get a folder if it exists or create it if it doesn't
if folderId != "":
try:
if hasattr(container, folderId):
container = container[folderId]
else:
print ("== Creating new folder ==")
container.invokeFactory(type_name="Folder", id=folderId, title=folderId)
container = container[folderId]
#publish the folder if needed
if publish:
container.portal_workflow.doActionFor(container, "publish", comment="content automatically published by migrationScript")
return container
except:
print("Folder %s could not be created: %s"%(folderId, sys.exc_info()[1]))
return None
else:
return None
def addImage(self, container, image):
try:
filename = image.split("\\")[2]
dirtyId = filename
result = False
transaction.begin()
id = idnormalizer.normalize(unicode(dirtyId, "utf-8"))
#if not hasattr(container, str(id)): #The processForm changes the id to the fileneame in lower case
if not hasattr(container, filename.lower()):
#import pdb; pdb.set_trace()
print "Adding a new image: %s"%filename
container.invokeFactory(type_name="Image", id=id, title=filename)
else:
print "Image %s already exists, skipping"%filename
return True
item = container[str(id)]
imageFile = open(IMAGE_FOLDER + filename.lower(), "r")
imageData = imageFile.read()
item.edit(file=imageData)
imageFile.close()
#import pdb; pdb.set_trace()
item.processForm()
transaction.commit()
result = True
return result
except:
transaction.abort()
print "Unexpected error on createImage: ", sys.exc_info()[1]
return False
def addLeadImage(self, item, image):
#set the lead image if necessary and if lead image product is installed
if LEADIMAGE_EXISTS and image != "":
#download and create the image
try:
imageFile = urllib2.urlopen(image)
imageData = imageFile.read()
urlSplit = image.split("/")
filename = urlSplit[len(urlSplit)-1]
#add the image as leadImage
if ILeadImageable.providedBy(item):
field = aq_inner(item).getField(IMAGE_FIELD_NAME)
field.set(item, imageData, filename=filename)
else:
print("Item type does not accept leadImage")
#release the image file
imageFile.close()
return
except:
print "LeadImage URL not available. LeadImage not created because: (" + image + ")", sys.exc_info()[1]
return
def addLeadImageCaption(self, item, caption):
#set the caption if necessary and if lead image product is installed
if LEADIMAGE_EXISTS and caption != "":
#add the caption
try:
if ILeadImageable.providedBy(item):
field = aq_inner(item).getField(IMAGE_CAPTION_FIELD_NAME)
field.set(item, caption)
else:
print("Item type does not accept leadImage therefore captions will be ignored")
except:
print "Error adding leadImage caption: ", sys.exc_info()[1]
return
def createObject(self, obj):
transaction.begin()
container = self.getContainer()
dirtyId = obj.priref
counter = 1
result = False
try:
id = idnormalizer.normalize(unicode(dirtyId, "utf-8"))
#while hasattr(container, id) and id != "":
# print ("Object " + id + " already exists.")
# counter = counter+1
# dirtyId = obj.title + str(counter)
# id = idnormalizer.normalize(unicode(dirtyId, "utf-8"))
# print ("creating " + id + " instead")
if hasattr(container, id):
self.created = self.created + 1
print "Item already exists, reviewing fields"
changedObj = False
existingObj = container[id]
if existingObj.title != obj.Title():
existingObj.title = obj.Title()
#print "Title change from %s to %s"%(existingObj.title, obj.Title())
changedObj = True
if existingObj.label_text != obj.label_text:
existingObj.label_text = obj.label_text
#print "Label change from %s to %s"%(existingObj.label_text, obj.label_text)
changedObj = True
if existingObj.object_number != obj.object_number:
existingObj.object_number = obj.object_number
#print "Object Number change from %s to %s"%(existingObj.object_number, obj.object_number)
changedObj = True
if existingObj.object_name != obj.object_name:
existingObj.object_name = obj.object_name
#print "Object Name change from %s to %s"%(existingObj.object_name, obj.object_name)
changedObj = True
if existingObj.production_place != obj.production_place:
existingObj.production_place = obj.production_place
#print "Production Place change from %s to %s"%(existingObj.production_place, obj.production_place)
changedObj = True
if existingObj.production_date_start != obj.production_date_start:
existingObj.production_date_start = obj.production_date_start
#print "Production Date Start change from %s to %s"%(existingObj.production_date_start, obj.production_date_start)
changedObj = True
if existingObj.production_date_end != obj.production_date_end:
existingObj.production_date_end = obj.production_date_end
#print "Production Date End change from %s to %s"%(existingObj.production_date_end, obj.production_date_end)
changedObj = True
if existingObj.period != obj.production_period:
print "Production period change from %s to %s"%(existingObj.period, obj.production_period)
existingObj.period = obj.production_period
changedObj = True
if existingObj.materials != obj.Materials():
existingObj.materials = obj.Materials()
#print "Materials change from %s to %s"%(existingObj.materials, obj.Materials())
changedObj = True
if existingObj.creator != obj.creator:
existingObj.creator = obj.creator
#print "Creator change from %s to %s"%(existingObj.creator, obj.creator)
changedObj = True
if changedObj:
# Commit transaction
print "Item has changed, Commiting transaction..."
transaction.commit()
# Perform ZEO client synchronization (if runnning in clustered mode) Not doing this because now its running as a External Metod instead
#app._p_jar.sync()
#print "Checking for new images:"
#Add Images to the object
#item = container[id]
#for image in obj.images:
#pass
#print "Adding image %s: "%image
#self.addImage(item, image)
return True
#Check if Object exists
if not hasattr(container, id):
print "NEW OBJECT FOUND. ADDING: %s"%id
container.invokeFactory(
type_name="Object",
id=id,
title=obj.Title(),
priref=obj.priref,
label_text = obj.label_text,
object_number=obj.object_number,
object_name = obj.object_name,
production_place = obj.production_place,
production_date_start = obj.production_date_start,
production_date_end = obj.production_date_end,
period = obj.production_period,
materials = obj.Materials(),
creator = obj.creator
)
#get the Object after creating
item = container[id]
#set the body
item.setText(obj.body)
#set the dimentions
dims = []
for i in range(0, len(obj.dimention_types)):
try:
dims.append("%s: %s %s"%(obj.dimention_types[i], obj.dimention_values[i], obj.dimention_units[i]))
except:
pass
item.dimentions = "; ".join(dims)
#Add tags to Keywords/Categories
item.setSubject(obj.tags)
#publish or revise
if obj.checked:
item.portal_workflow.doActionFor(item, "revise")
#else:
#item.portal_workflow.doActionFor(item, "publish", comment="Content automatically published by migrationScript")
# Commit transaction
transaction.commit()
# Perform ZEO client synchronization (if runnning in clustered mode) Not doing this because now its running as a External Metod instead
#app._p_jar.sync()
#Add Images to the object
for image in obj.images:
#pass
print "Adding image %s: "%image
self.addImage(item, image)
result = True
self.created = self.created + 1
print("== Page created ==")
except:
self.errors = self.errors + 1
print "Unexpected error on createObject (" +dirtyId+ "):", sys.exc_info()[1]
transaction.abort()
raise
return result
if not result:
self.skipped = self.skipped + 1
print("Skipped item: " + dirtyId)
return result
def migrateTest(self):
root = self.xmlDoc.children
for field in root.children:
if field.name == "record":
#print("== Parsing record ==")
for testField in field.children:
if testField.name not in self.fields:
self.fields.append(testField.name)
return
def migrateToObject(self):
root = self.xmlDoc.children
for field in root.children:
if field.name == "record":
#print("== Parsing Object: ==")
currentObject = ObjectItem()
for objectField in field.children:
if objectField.name == 'priref':
currentObject.priref = objectField.content
#print(" priref: " + currentObject.priref)
elif objectField.name == 'object_name':
currentObject.tags.append(objectField.content)
currentObject.object_name = objectField.content
#print(" tag added: " + objectField.content)
elif objectField.name == 'label.text':
if currentObject.body == "":
currentObject.body = "<p>%s</p>"%objectField.content
else:
currentObject.label_text = currentObject.label_text + "<p>%s</p>"%objectField.content
#print(" body / label_text added: " + currentObject.body)
elif objectField.name == 'reproduction.identifier_URL':
currentObject.images.append(objectField.content)
#print(" image: " + objectField.content)
elif objectField.name == 'object_number':
currentObject.object_number = objectField.content
#print(" object_number: " + currentObject.object_number)
elif objectField.name == 'title':
if objectField.content !="-":
currentObject.title = objectField.content
#print(" title: " + currentObject.title)
elif objectField.name == 'production.place':
currentObject.production_place = objectField.content
#print(" production_place: " + currentObject.production_place)
elif objectField.name == 'production.date.start':
currentObject.production_date_start = objectField.content
#print(" production_date_start: " + currentObject.production_date_start)
elif objectField.name == 'production.date.end':
currentObject.production_date_end = objectField.content
#print(" production_date_end: " + currentObject.production_date_end)
elif objectField.name == 'production.period':
currentObject.production_period = objectField.content
#print(" production_period: " + currentObject.production_period)
elif objectField.name == 'material':
currentObject.materials.append(objectField.content)
currentObject.tags.append(objectField.content)
#print(" material added: " + objectField.content)
elif objectField.name == 'dimension.type':
currentObject.dimention_types.append(objectField.content)
#print(" dimention added: " + objectField.content)
elif objectField.name == 'dimension.value':
currentObject.dimention_values.append(objectField.content)
#print(" dimention val added: " + objectField.content)
elif objectField.name == 'dimension.unit':
currentObject.dimention_units.append(objectField.content)
#print(" dimention unit added: " + objectField.content)
elif objectField.name == 'creator':
currentObject.creator = objectField.content
#print(" creator: " + currentObject.creator)
#currentObject is now populated with the data from the XML now we create a Object in plone
self.createObject(currentObject)
return
def startMigration(self):
if self.portal is not None:
if self.typeToCreate == "Test":
self.migrateTest()
for f in self.fields:
print f
elif self.typeToCreate == "Object":
self.migrateToObject()
else:
print("TYPE NOT RECOGNIZED!! ==>> " + self.typeToCreate)
self.cleanUp()
else:
print ("Portal is NONE!!!")
self.cleanUp()
return
|
davidjonas/PloneXMLMigration
|
migrator.py
|
Python
|
unlicense
| 20,468
|
import pytest
import numpy as np
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import core
from keras.utils.test_utils import layer_test, keras_test
from numpy.testing import assert_allclose
@keras_test
def test_cosinedense():
from keras import regularizers
from keras import constraints
from keras.models import Sequential
layer_test(core.CosineDense,
kwargs={'units': 3},
input_shape=(3, 2))
layer_test(core.CosineDense,
kwargs={'units': 3},
input_shape=(3, 4, 2))
layer_test(core.CosineDense,
kwargs={'units': 3},
input_shape=(None, None, 2))
layer_test(core.CosineDense,
kwargs={'units': 3},
input_shape=(3, 4, 5, 2))
layer_test(core.CosineDense,
kwargs={'units': 3,
'kernel_regularizer': regularizers.l2(0.01),
'bias_regularizer': regularizers.l1(0.01),
'activity_regularizer': regularizers.l2(0.01),
'kernel_constraint': constraints.MaxNorm(1),
'bias_constraint': constraints.MaxNorm(1)},
input_shape=(3, 2))
X = np.random.randn(1, 20)
model = Sequential()
model.add(core.CosineDense(1, use_bias=True, input_shape=(20,)))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = X.T
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
X = np.random.randn(1, 20)
model = Sequential()
model.add(core.CosineDense(1, use_bias=False, input_shape=(20,)))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * X.T
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__])
|
stygstra/keras-contrib
|
tests/keras_contrib/layers/test_core.py
|
Python
|
mit
| 2,049
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import logging
import types
import warnings
from django.conf import settings
from django.core import urlresolvers
from django import shortcuts
from django.template.loader import render_to_string # noqa
from django.utils.datastructures import SortedDict
from django.utils.functional import Promise # noqa
from django.utils.http import urlencode # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import exceptions
from horizon import messages
from horizon.utils import functions
from horizon.utils import html
LOG = logging.getLogger(__name__)
# For Bootstrap integration; can be overridden in settings.
ACTION_CSS_CLASSES = ("btn", "btn-default", "btn-sm")
STRING_SEPARATOR = "__"
class BaseActionMetaClass(type):
"""Metaclass for adding all actions options from inheritance tree
to action.
This way actions can inherit from each other but still use
the class attributes DSL. Meaning, all attributes of Actions are
defined as class attributes, but in the background, it will be used as
parameters for the initializer of the object. The object is then
initialized clean way. Similar principle is used in DataTableMetaclass.
"""
def __new__(mcs, name, bases, attrs):
# Options of action are set as class attributes, loading them.
options = {}
if attrs:
options = attrs
# Iterate in reverse to preserve final order
for base in bases[::-1]:
# It actually throws all super classes away except immediate
# superclass. But it's fine, immediate super-class base_options
# includes everything because superclasses was created also by
# this metaclass. Same principle is used in DataTableMetaclass.
if hasattr(base, 'base_options') and base.base_options:
base_options = {}
# Updating options by superclasses.
base_options.update(base.base_options)
# Updating superclass options by actual class options.
base_options.update(options)
options = base_options
# Saving all options to class attribute, this will be used for
# instantiating of the specific Action.
attrs['base_options'] = options
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
cls.base_options.update(kwargs)
# Adding cls.base_options to each init call.
klass = super(BaseActionMetaClass, cls).__call__(
*args, **cls.base_options)
return klass
@six.add_metaclass(BaseActionMetaClass)
class BaseAction(html.HTMLElement):
"""Common base class for all ``Action`` classes."""
def __init__(self, **kwargs):
super(BaseAction, self).__init__()
self.datum = kwargs.get('datum', None)
self.table = kwargs.get('table', None)
self.handles_multiple = kwargs.get('handles_multiple', False)
self.requires_input = kwargs.get('requires_input', False)
self.preempt = kwargs.get('preempt', False)
self.policy_rules = kwargs.get('policy_rules', None)
def data_type_matched(self, datum):
"""Method to see if the action is allowed for a certain type of data.
Only affects mixed data type tables.
"""
if datum:
action_data_types = getattr(self, "allowed_data_types", [])
# If the data types of this action is empty, we assume it accepts
# all kinds of data and this method will return True.
if action_data_types:
datum_type = getattr(datum, self.table._meta.data_type_name,
None)
if datum_type and (datum_type not in action_data_types):
return False
return True
def get_policy_target(self, request, datum):
"""Provide the target for a policy request.
This method is meant to be overridden to return target details when
one of the policy checks requires them. E.g., {"user_id": datum.id}
"""
return {}
def allowed(self, request, datum):
"""Determine whether this action is allowed for the current request.
This method is meant to be overridden with more specific checks.
"""
return True
def _allowed(self, request, datum):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check and self.policy_rules:
target = self.get_policy_target(request, datum)
return (policy_check(self.policy_rules, request, target) and
self.allowed(request, datum))
return self.allowed(request, datum)
def update(self, request, datum):
"""Allows per-action customization based on current conditions.
This is particularly useful when you wish to create a "toggle"
action that will be rendered differently based on the value of an
attribute on the current row's data.
By default this method is a no-op.
"""
pass
def get_default_classes(self):
"""Returns a list of the default classes for the action. Defaults to
``["btn", "btn-default", "btn-sm"]``.
"""
return getattr(settings, "ACTION_CSS_CLASSES", ACTION_CSS_CLASSES)
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action.
Defaults to returning an ``id`` attribute with the value
``{{ table.name }}__action_{{ action.name }}__{{ creation counter }}``.
"""
if self.datum is not None:
bits = (self.table.name,
"row_%s" % self.table.get_object_id(self.datum),
"action_%s" % self.name)
else:
bits = (self.table.name, "action_%s" % self.name)
return {"id": STRING_SEPARATOR.join(bits)}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def associate_with_table(self, table):
self.table = table
class Action(BaseAction):
"""Represents an action which can be taken on this table's data.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: verbose_name_plural
Used like ``verbose_name`` in cases where ``handles_multiple`` is
``True``. Defaults to ``verbose_name`` with the letter "s" appended.
.. attribute:: method
The HTTP method for this action. Defaults to ``POST``. Other methods
may or may not succeed currently.
.. attribute:: requires_input
Boolean value indicating whether or not this action can be taken
without any additional input (e.g. an object id). Defaults to ``True``.
.. attribute:: preempt
Boolean value indicating whether this action should be evaluated in
the period after the table is instantiated but before the data has
been loaded.
This can allow actions which don't need access to the full table data
to bypass any API calls and processing which would otherwise be
required to load the table.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Default to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
scope: service type managing the policy for action
rule: string representing the action to be checked
for a policy that requires a single rule check:
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks:
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
At least one of the following methods must be defined:
.. method:: single(self, data_table, request, object_id)
Handler for a single-object action.
.. method:: multiple(self, data_table, request, object_ids)
Handler for multi-object actions.
.. method:: handle(self, data_table, request, object_ids)
If a single function can work for both single-object and
multi-object cases then simply providing a ``handle`` function
will internally route both ``single`` and ``multiple`` requests
to ``handle`` with the calls from ``single`` being transformed
into a list containing only the single object id.
"""
def __init__(self, single_func=None, multiple_func=None, handle_func=None,
attrs=None, **kwargs):
super(Action, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.requires_input = kwargs.get('requires_input', True)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.verbose_name_plural = kwargs.get('verbose_name_plural',
"%ss" % self.verbose_name)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
if attrs:
self.attrs.update(attrs)
# Don't set these if they're None
if single_func:
self.single = single_func
if multiple_func:
self.multiple = multiple_func
if handle_func:
self.handle = handle_func
# Ensure we have the appropriate methods
has_handler = hasattr(self, 'handle') and callable(self.handle)
has_single = hasattr(self, 'single') and callable(self.single)
has_multiple = hasattr(self, 'multiple') and callable(self.multiple)
if has_handler or has_multiple:
self.handles_multiple = True
if not has_handler and (not has_single or has_multiple):
cls_name = self.__class__.__name__
raise NotImplementedError('You must define either a "handle" '
'method or a "single" or "multiple" '
'method on %s.' % cls_name)
if not has_single:
def single(self, data_table, request, object_id):
return self.handle(data_table, request, [object_id])
self.single = types.MethodType(single, self)
if not has_multiple and self.handles_multiple:
def multiple(self, data_table, request, object_ids):
return self.handle(data_table, request, object_ids)
self.multiple = types.MethodType(multiple, self)
def get_param_name(self):
"""Returns the full POST parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}``.
"""
return "__".join([self.table.name, self.name])
class LinkAction(BaseAction):
"""A table action which is simply a link rather than a form POST.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A string which will be rendered as the link text. (Required)
.. attribute:: url
A string or a callable which resolves to a url to be used as the link
target. You must either define the ``url`` attribute or override
the ``get_link_url`` method on the class.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Defaults to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
"""
# class attribute name is used for ordering of Actions in table
name = "link"
ajax = False
def __init__(self, attrs=None, **kwargs):
super(LinkAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "GET")
self.bound_url = kwargs.get('bound_url', None)
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.url = kwargs.get('url', None)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
self.kwargs = kwargs
if not kwargs.get('verbose_name', None):
raise NotImplementedError('A LinkAction object must have a '
'verbose_name attribute.')
if attrs:
self.attrs.update(attrs)
if self.ajax:
self.classes = list(self.classes) + ['ajax-update']
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(
SortedDict([("action", self.name), ("table", self.table.name)])
)
return "%s?%s" % (table_url, params)
def render(self):
return render_to_string("horizon/common/_data_table_table_action.html",
{"action": self})
def associate_with_table(self, table):
super(LinkAction, self).associate_with_table(table)
if self.ajax:
self.attrs['data-update-url'] = self.get_ajax_update_url()
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=(obj_id,))
else:
return urlresolvers.reverse(self.url)
except urlresolvers.NoReverseMatch as ex:
LOG.info('No reverse found for "%s": %s' % (self.url, ex))
return self.url
class FilterAction(BaseAction):
"""A base class representing a filter action for a table.
.. attribute:: name
The short name or "slug" representing this action. Defaults to
``"filter"``.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: param_name
A string representing the name of the request parameter used for the
search term. Default: ``"q"``.
.. attribute:: filter_type
A string representing the type of this filter. If this is set to
``"server"`` then ``filter_choices`` must also be provided.
Default: ``"query"``.
.. attribute:: filter_choices
Required for server type filters. A tuple of tuples representing the
filter options. Tuple composition should evaluate to (string, string,
boolean), representing the filter parameter, display value, and whether
or not it should be applied to the API request as an API query
attribute. API type filters do not need to be accounted for in the
filter method since the API will do the filtering. However, server
type filters in general will need to be performed in the filter method.
By default this attribute is not provided.
.. attribute:: needs_preloading
If True, the filter function will be called for the initial
GET request with an empty ``filter_string``, regardless of the
value of ``method``.
"""
# TODO(gabriel): The method for a filter action should be a GET,
# but given the form structure of the table that's currently impossible.
# At some future date this needs to be reworked to get the filter action
# separated from the table's POST form.
# class attribute name is used for ordering of Actions in table
name = "filter"
def __init__(self, **kwargs):
super(FilterAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', _("Filter"))
self.filter_type = kwargs.get('filter_type', "query")
self.filter_choices = kwargs.get('filter_choices')
self.needs_preloading = kwargs.get('needs_preloading', False)
self.param_name = kwargs.get('param_name', 'q')
self.icon = "search"
if self.filter_type == 'server' and self.filter_choices is None:
raise NotImplementedError(
'A FilterAction object with the '
'filter_type attribute set to "server" must also have a '
'filter_choices attribute.')
def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name])
def assign_type_string(self, table, data, type_string):
for datum in data:
setattr(datum, table._meta.data_type_name, type_string)
def data_type_filter(self, table, data, filter_string):
filtered_data = []
for data_type in table._meta.data_types:
func_name = "filter_%s_data" % data_type
filter_func = getattr(self, func_name, None)
if not filter_func and not callable(filter_func):
# The check of filter function implementation should happen
# in the __init__. However, the current workflow of DataTable
# and actions won't allow it. Need to be fixed in the future.
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
_data = filter_func(table, data, filter_string)
self.assign_type_string(table, _data, data_type)
filtered_data.extend(_data)
return filtered_data
def filter(self, table, data, filter_string):
"""Provides the actual filtering logic.
This method must be overridden by subclasses and return
the filtered data.
"""
return data
def is_api_filter(self, filter_field):
"""Determine if the given filter field should be used as an
API filter.
"""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2] is True):
return True
return False
class FixedFilterAction(FilterAction):
"""A filter action with fixed buttons."""
def __init__(self, **kwargs):
super(FixedFilterAction, self).__init__(**kwargs)
self.filter_type = kwargs.get('filter_type', "fixed")
self.needs_preloading = kwargs.get('needs_preloading', True)
self.fixed_buttons = self.get_fixed_buttons()
self.filter_string = ''
def filter(self, table, images, filter_string):
self.filter_string = filter_string
categories = self.categorize(table, images)
self.categories = defaultdict(list, categories)
for button in self.fixed_buttons:
button['count'] = len(self.categories[button['value']])
if not filter_string:
return images
return self.categories[filter_string]
def get_fixed_buttons(self):
"""Returns a list of dictionaries describing the fixed buttons
to use for filtering.
Each list item should be a dict with the following keys:
* ``text``: Text to display on the button
* ``icon``: Icon class for icon element (inserted before text).
* ``value``: Value returned when the button is clicked. This value is
passed to ``filter()`` as ``filter_string``.
"""
return []
def categorize(self, table, images):
"""Override to separate images into categories.
Return a dict with a key for the value of each fixed button,
and a value that is a list of images in that category.
"""
return {}
class BatchAction(Action):
"""A table action which takes batch action on one or more
objects. This action should not require user input on a
per-object basis.
.. attribute:: name
An internal name for this action.
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
String or tuple/list. The display forms of the name.
Should be a transitive verb, capitalized and translated. ("Delete",
"Rotate", etc.) If tuple or list - then setting
self.current_present_action = n will set the current active item
from the list(action_present[n])
You can pass a complete action name including 'data_type' by specifying
'%(data_type)s' substitution in action_present ("Delete %(data_type)s").
Otherwise a complete action name is a format of "<action> <data_type>".
<data_type> is determined based on the number of items.
By passing a complete action name you allow translators to control
the order of words as they want.
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
String or tuple/list. The past tense of action_present. ("Deleted",
"Rotated", etc.) If tuple or list - then
setting self.current_past_action = n will set the current active item
from the list(action_past[n])
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular
Optional display name (if the data_type method is not defined) for the
type of data that receives the action. ("Key Pair", "Floating IP", etc.)
.. attribute:: data_type_plural
Optional plural word (if the data_type method is not defined) for the
type of data being acted on. Defaults to appending 's'. Relying on the
default is bad for translations and should not be done, so it's absence
will raise a DeprecationWarning. It is currently kept as optional for
legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
.. attribute:: success_url
Optional location to redirect after completion of the delete
action. Defaults to the current page.
.. attribute:: help_text
Optional message for providing an appropriate help text for
the horizon user.
"""
help_text = _("This action cannot be undone.")
def __init__(self, **kwargs):
super(BatchAction, self).__init__(**kwargs)
action_present_method = False
if hasattr(self, 'action_present'):
if callable(self.action_present):
action_present_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_present '
'method instead of attribute.' % self.__class__.__name__
))
action_past_method = False
if hasattr(self, 'action_past'):
if callable(self.action_past):
action_past_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_past '
'method instead of attribute.' % self.__class__.__name__
))
action_methods = action_present_method and action_past_method
has_action_method = action_present_method or action_past_method
if has_action_method and not action_methods:
raise NotImplementedError(
'The %s BatchAction class must have both action_past and'
'action_present methods.' % self.__class__.__name__
)
if not action_methods:
if not kwargs.get('data_type_singular'):
raise NotImplementedError(
'The %s BatchAction class must have a data_type_singular '
'attribute when action_past and action_present attributes '
'are used.' % self.__class__.__name__
)
self.data_type_singular = kwargs.get('data_type_singular')
self.data_type_plural = kwargs.get('data_type_plural',
self.data_type_singular + 's')
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
self.use_action_method = action_methods
self.success_url = kwargs.get('success_url', None)
# If setting a default name, don't initialize it too early
self.verbose_name = kwargs.get('verbose_name', self._get_action_name)
self.verbose_name_plural = kwargs.get(
'verbose_name_plural',
lambda: self._get_action_name('plural'))
self.current_present_action = 0
self.current_past_action = 0
# Keep record of successfully handled objects
self.success_ids = []
self.help_text = kwargs.get('help_text', self.help_text)
def _allowed(self, request, datum=None):
# Override the default internal action method to prevent batch
# actions from appearing on tables with no data.
if not self.table.data and not datum:
return False
return super(BatchAction, self)._allowed(request, datum)
def _get_action_name(self, items=None, past=False):
"""Builds combinations like 'Delete Object' and 'Deleted
Objects' based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
action_attr = getattr(self, "action_%s" % action_type)
if self.use_action_method:
action_attr = action_attr(count)
if isinstance(action_attr, (basestring, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
if self.use_action_method:
return action
# TODO(ygbo): get rid of all this bellow once action_present and
# action_past are changed to methods handling plurals.
data_type = ungettext_lazy(
self.data_type_singular,
self.data_type_plural,
count
)
if '%(data_type)s' in action:
# If full action string is specified, use action as format string.
msgstr = action
else:
if action_type == "past":
msgstr = pgettext_lazy(u"past", "%(action)s %(data_type)s")
else:
msgstr = pgettext_lazy(u"present", "%(action)s %(data_type)s")
return msgstr % {'action': action, 'data_type': data_type}
def action(self, request, datum_id):
"""Required. Accepts a single object id and performs the specific
action.
Return values are discarded, errors raised are caught and logged.
"""
def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural')
def get_success_url(self, request=None):
"""Returns the URL to redirect to after a successful action."""
if self.success_url:
return self.success_url
return request.get_full_path()
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action."""
attrs = super(BatchAction, self).get_default_attrs()
attrs.update({'data-batch-action': 'true'})
return attrs
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or datum_id
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.info('Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
# Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
if getattr(ex, "_safe_message", None):
ignore = False
else:
ignore = True
action_failure.append(datum_display)
exceptions.handle(request, ignore=ignore)
# Begin with success message class, downgrade to info if problems.
success_message_level = messages.success
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_success:
msg = _('%(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_success, past=True),
"objs": functions.lazy_join(", ", action_success)}
success_message_level(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class DeleteAction(BatchAction):
"""A table action used to perform delete operations on table data.
.. attribute:: name
A short name or "slug" representing this action.
Defaults to 'delete'
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
A string containing the transitive verb describing the delete action.
Defaults to 'Delete'
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
A string set to the past tense of action_present.
Defaults to 'Deleted'
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular (PendingDeprecation)
A string used to name the data to be deleted.
.. attribute:: data_type_plural (PendingDeprecation)
Optional. Plural of ``data_type_singular``.
Defaults to ``data_type_singular`` appended with an 's'. Relying on
the default is bad for translations and should not be done, so it's
absence will raise a DeprecationWarning. It is currently kept as
optional for legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
"""
name = "delete"
def __init__(self, **kwargs):
super(DeleteAction, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
if not hasattr(self, "action_present"):
self.action_present = kwargs.get('action_present', _("Delete"))
if not hasattr(self, "action_past"):
self.action_past = kwargs.get('action_past', _("Deleted"))
self.icon = "remove"
def action(self, request, obj_id):
"""Action entry point. Overrides base class' action method.
Accepts a single object id passing it over to the delete method
responsible for the object's destruction.
"""
return self.delete(request, obj_id)
def delete(self, request, obj_id):
"""Required. Deletes an object referenced by obj_id.
Override to provide delete functionality specific to your data.
"""
def get_default_classes(self):
"""Appends ``btn-danger`` to the action's default css classes.
This method ensures the corresponding button is highlighted
as a trigger for a potentially dangerous action.
"""
classes = super(DeleteAction, self).get_default_classes()
classes += ("btn-danger",)
return classes
class UpdateAction(object):
"""A table action for cell updates by inline editing."""
name = "update"
action_present = _("Update")
action_past = _("Updated")
data_type_singular = "update"
def action(self, request, datum, obj_id, cell_name, new_cell_value):
self.update_cell(request, datum, obj_id, cell_name, new_cell_value)
def update_cell(self, request, datum, obj_id, cell_name, new_cell_value):
"""Method for saving data of the cell.
This method must implements saving logic of the inline edited table
cell.
"""
def allowed(self, request, datum, cell):
"""Determine whether updating is allowed for the current request.
This method is meant to be overridden with more specific checks.
Data of the row and of the cell are passed to the method.
"""
return True
|
NCI-Cloud/horizon
|
horizon/tables/actions.py
|
Python
|
apache-2.0
| 38,502
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('utils', '0001_initial'),
('socialmedia', '0001_initial'),
('tenant', '0001_initial'),
('contacts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='function',
name='email_addresses',
field=models.ManyToManyField(to='utils.EmailAddress', verbose_name='list of email addresses'),
preserve_default=True,
),
migrations.AddField(
model_name='function',
name='manager',
field=models.ForeignKey(
related_name='manager',
verbose_name='manager',
blank=True,
to='contacts.Contact',
null=True
),
preserve_default=True,
),
migrations.AddField(
model_name='function',
name='phone_numbers',
field=models.ManyToManyField(to='utils.PhoneNumber', verbose_name='list of phone numbers'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='function',
unique_together=set([('account', 'contact')]),
),
migrations.AddField(
model_name='contact',
name='addresses',
field=models.ManyToManyField(to='utils.Address', verbose_name='list of addresses', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='contact',
name='email_addresses',
field=models.ManyToManyField(to='utils.EmailAddress', verbose_name='list of e-mail addresses', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='contact',
name='phone_numbers',
field=models.ManyToManyField(to='utils.PhoneNumber', verbose_name='list of phone numbers', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='contact',
name='social_media',
field=models.ManyToManyField(
to='socialmedia.SocialMedia',
verbose_name='list of social media',
blank=True
),
preserve_default=True,
),
migrations.AddField(
model_name='contact',
name='tenant',
field=models.ForeignKey(to='tenant.Tenant', blank=True),
preserve_default=True,
),
]
|
HelloLily/hellolily
|
lily/contacts/migrations/0002_auto_20150218_1722.py
|
Python
|
agpl-3.0
| 2,655
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class BanqueEuropeenneCreditMutuelTest(BackendTest):
MODULE = 'becm'
def test_becm(self):
l = list(self.backend.iter_accounts())
if len(l) > 0:
a = l[0]
list(self.backend.iter_history(a))
|
vicnet/weboob
|
modules/becm/test.py
|
Python
|
lgpl-3.0
| 1,064
|
#-------------------------------------------------------------------------------
# Name: breakaway-parser
# Purpose: Parse Breakaway PDF into ICS file
#
# Author: cthompso
#
# Created: 12/06/2013
# Copyright: (c) cthompso 2013
# Licence: MIT
#-------------------------------------------------------------------------------
import pdb,tempfile,subprocess,re
from optparse import OptionParser
from icalendar import Calendar, Event
import pytz
from datetime import datetime, timedelta, date
import time
def main():
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
(options, args) = parser.parse_args()
teams = get_teams(options.filename)
games = get_games(options.filename)
for key,team in teams.items():
create_ics(team, games, teams)
def create_ics(team, games, teams):
cal = Calendar()
cal.add('prodid', '-//Breakway Schedules//mxm.dk//')
cal.add('version', '2.0')
for game in games:
if team.id == game.away_team or team.id == game.home_team:
event = Event()
try:
event.add('summary', '%s vs. %s' % (teams[game.home_team], teams[game.away_team]))
except Exception as e:
pdb.set_trace()
event.add('dtstart', game.datetime)
event.add('dtend', game.datetime + timedelta(hours=1))
event.add('dtstamp', datetime.now())
event.add('location', "BreakAway Field %s" % (game.field))
event['uid'] = '20050115T101010/27346262376@mxm.dk'
event.add('priority', 5)
cal.add_component(event)
file = open('ics/%s - %s.ics' % (team.id, team.name), 'wb')
file.write(cal.to_ical())
file.close()
def get_games(filename):
# Use the non-layout version to parse out the teams.
with open(filename, 'r') as pdf_file:
text_file = ConvertPDFToText(pdf_file,0)
games = []
mode = "start"
week = ""
game_date = ""
today = datetime.now()
while True:
line = text_file.readline()
if not line: break #EOF
if line.strip() == "TEAM (COLOR)":
mode = "team"
if line.strip() == "GOOD LUCK & HAVE FUN!":
mode = "team-complete"
if line.strip() == "WEEK 1":
mode = "sched"
if line.strip() == "IMPORTANT EVERYONE READ!":
mode = "sched-complete"
home_team = 0
away_team = 0
game_time = ""
field = ""
try:
match = re.match("WEEK (\d+)",line)
if mode == "sched" and match:
week = match.group(1)
match = re.match("(\w+)\.(\w+)\.\s+(\d+)",line)
if mode == "sched" and match:
day_of_week = match.group(1)
month_of_year = match.group(2)
day_of_month = match.group(3)
month_dt = datetime.strptime(month_of_year, '%b') # convert string format to month number
match = re.match("(\d+)-(\d+) (\d+\:\d{2})(\d?)",line)
if mode == "sched" and match:
home_team = int(match.group(1))
away_team = int(match.group(2))
game_time = match.group(3)
field = match.group(4)
# catch the case where pdftotext didn't get the splitting right
match_time = re.match("(\d+\:\d{2})(\d?)$",line)
match_mtch = re.match("(\d+)-(\d+)$",line)
if mode == "sched" and (match_time or match_mtch):
line2 = text_file.readline()
if not line2: break #EOF
if match_time:
match_mtch = re.match("(\d+)-(\d+)$",line2)
elif match_mtch:
match_time = re.match("(\d+\:\d{2})(\d?)$",line2)
game_time = match_time.group(1)
field = match_time.group(2)
home_team = int(match_mtch.group(1))
away_team = int(match_mtch.group(2))
except Exception as e:
print "%s - %s" % (e,line)
if mode =="sched" and home_team and away_team:
try:
date_string = "%02d/%s/%s %s PM" % (int(day_of_month), month_dt.month, today.year, game_time)
game_datetime = datetime.strptime(date_string, '%d/%m/%Y %I:%M %p')
except Exception as e:
pdb.set_trace()
if not field: field = "1" # default to field 1
game = Game(home_team, away_team, game_datetime, field)
games.append(game)
for game in games:
print game
return games
def get_teams(filename):
# Use the layout version to parse out the teams.
with open(filename, 'r') as pdf_file:
text_file = ConvertPDFToText(pdf_file,1)
teams = {}
mode = "start"
for line in text_file:
if line.strip() == "TEAM (COLOR)":
mode = "team"
if line.strip() == "GOOD LUCK & HAVE FUN!":
mode = "team-complete"
if line.strip() == "WEEK 1":
mode = "sched"
if line.strip() == "IMPORTANT EVERYONE READ!":
mode = "sched-complete"
if mode == "team" and re.match("\d+.*",line.strip()):
team_handler(line,teams)
for team_id,team in teams.items():
print team
return teams
def team_handler(line, teams):
match = match = re.match("\s*(\d+)\.?\s+(.*)\s+(\d+)\.\s+(.*)",line)
if match is None:
match = re.match("\s*(\d+)\.?\s+(.*)$",line)
if match is None:
print line
try:
team1_id = int(match.group(1))
team1_nm = match.group(2)
team1 = Team(team1_id, team1_nm)
teams[team1_id] = team1
except Exception as e:
print "Warning: %s - %s" % (e,line)
try:
team2_id = int(match.group(3))
team2_nm = match.group(4)
team2 = Team(team2_id, team2_nm)
teams[team2_id] = team2
except Exception as e:
print "Warning: %s - %s" % (e,line)
class Team:
def __init__(self, id):
self._schedule = []
self._id = id
def __init__(self, id, name):
self._schedule = []
self._id = id
self.name = unicode(name.strip(), errors='ignore')
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@name.setter
def name(self,value):
self._name = value
@property
def schedule(self):
return self._schedule
def add_game(self,game):
self._schedule.append(game)
def __unicode__(self):
return "%s - %s" % (self._id, self._name)
def __str__(self):
return self.__unicode__()
class Game:
def __init__(self, home_team, away_team, datetime, field):
self._home_team = home_team
self._away_team = away_team
self._datetime = datetime
self._field = field
@property
def home_team(self):
return self._home_team
@property
def away_team(self):
return self._away_team
@property
def datetime(self):
return self._datetime
@property
def field(self):
return self._field
def __unicode__(self):
return "%s [%02s vs. %02s] [Field %s]" % (self._datetime.isoformat(' '), self._home_team, self._away_team, self._field)
def __str__(self):
return self.__unicode__()
def ConvertPDFToText(currentPDF,layout):
pdfData = currentPDF.read()
tf = tempfile.NamedTemporaryFile()
tf.write(pdfData)
tf.seek(0)
outputTf = tempfile.NamedTemporaryFile()
if (len(pdfData) > 0) :
if layout:
out, err = subprocess.Popen(["pdftotext", "-layout", tf.name, outputTf.name ]).communicate()
else:
out, err = subprocess.Popen(["pdftotext", tf.name, outputTf.name ]).communicate()
return outputTf
else :
return None
if __name__ == '__main__':
main()
|
cooperthompson/breakaway-calendar
|
scripts/breakaway-parser.py
|
Python
|
mit
| 8,066
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from operator import attrgetter
import os
import shutil
from ..cli.error import ActionException
from ..cli.error import ArgumentException
from ..cli.error import ServerDataException
from ..cli.serializers import listdir_without_extensions
from .base import BaseObject
from .task import DeployTask
from .task import Task
class Environment(BaseObject):
class_api_path = "clusters/"
instance_api_path = "clusters/{0}/"
@classmethod
def create(cls, name, release_id, net, net_segment_type=None):
data = {
"nodes": [],
"tasks": [],
"name": name,
"release_id": release_id
}
if net.lower() == "nova":
data["net_provider"] = "nova_network"
else:
data["net_provider"] = "neutron"
if net_segment_type is None:
raise ArgumentException(
'"--net-segment-type" must be specified!')
data["net_segment_type"] = net_segment_type
data = cls.connection.post_request("clusters/", data)
return cls.init_with_data(data)
def set(self, data):
if data.get('mode'):
data["mode"] = "ha_compact" \
if data['mode'].lower() == "ha" else "multinode"
return self.connection.put_request(
"clusters/{0}/".format(self.id),
data
)
def update_env(self):
return Task.init_with_data(
self.connection.put_request(
"clusters/{0}/update/".format(self.id),
{}
)
)
def delete(self):
return self.connection.delete_request(
"clusters/{0}/".format(self.id)
)
def assign(self, nodes, roles):
return self.connection.post_request(
"clusters/{0}/assignment/".format(self.id),
[{'id': node.id, 'roles': roles} for node in nodes]
)
def unassign(self, nodes):
return self.connection.post_request(
"clusters/{0}/unassignment/".format(self.id),
[{"id": n} for n in nodes]
)
def get_all_nodes(self):
from .node import Node
return sorted(map(
Node.init_with_data,
self.connection.get_request(
"nodes/?cluster_id={0}".format(self.id)
)
), key=attrgetter)
def unassign_all(self):
nodes = self.get_all_nodes()
if not nodes:
raise ActionException(
"Environment with id={0} doesn't have nodes to remove."
.format(self.id)
)
return self.connection.post_request(
"clusters/{0}/unassignment/".format(self.id),
[{"id": n.id} for n in nodes]
)
def deploy_changes(self):
deploy_data = self.connection.put_request(
"clusters/{0}/changes".format(self.id),
{}
)
return DeployTask.init_with_data(deploy_data)
def get_network_data_path(self, directory=os.curdir):
return os.path.join(
os.path.abspath(directory),
"network_{0}".format(self.id)
)
def get_settings_data_path(self, directory=os.curdir):
return os.path.join(
os.path.abspath(directory),
"settings_{0}".format(self.id)
)
def write_network_data(self, network_data, directory=os.curdir,
serializer=None):
return (serializer or self.serializer).write_to_file(
self.get_network_data_path(directory),
network_data
)
def write_settings_data(self, settings_data, directory=os.curdir,
serializer=None):
return (serializer or self.serializer).write_to_file(
self.get_settings_data_path(directory),
settings_data
)
def read_network_data(self, directory=os.curdir,
serializer=None):
network_file_path = self.get_network_data_path(directory)
return (serializer or self.serializer).read_from_file(
network_file_path)
def read_settings_data(self, directory=os.curdir, serializer=None):
settings_file_path = self.get_settings_data_path(directory)
return (serializer or self.serializer).read_from_file(
settings_file_path)
@property
def settings_url(self):
return "clusters/{0}/attributes".format(self.id)
@property
def default_settings_url(self):
return self.settings_url + "/defaults"
@property
def network_url(self):
return "clusters/{id}/network_configuration/{net_provider}".format(
**self.data
)
@property
def network_verification_url(self):
return self.network_url + "/verify"
def get_network_data(self):
return self.connection.get_request(self.network_url)
def get_settings_data(self):
return self.connection.get_request(self.settings_url)
def get_default_settings_data(self):
return self.connection.get_request(self.default_settings_url)
def set_network_data(self, data):
return self.connection.put_request(
self.network_url, data)
def set_settings_data(self, data):
return self.connection.put_request(
self.settings_url, data)
def verify_network(self):
return self.connection.put_request(
self.network_verification_url, self.get_network_data())
def _get_fact_dir_name(self, fact_type, directory=os.path.curdir):
return os.path.join(
os.path.abspath(directory),
"{0}_{1}".format(fact_type, self.id))
def _get_fact_default_url(self, fact_type, nodes=None):
default_url = "clusters/{0}/orchestrator/{1}/defaults".format(
self.id,
fact_type
)
if nodes is not None:
default_url += "/?nodes=" + ",".join(map(str, nodes))
return default_url
def _get_fact_url(self, fact_type, nodes=None):
fact_url = "clusters/{0}/orchestrator/{1}/".format(
self.id,
fact_type
)
if nodes is not None:
fact_url += "/?nodes=" + ",".join(map(str, nodes))
return fact_url
def get_default_facts(self, fact_type, nodes=None):
facts = self.connection.get_request(
self._get_fact_default_url(fact_type, nodes=nodes))
if not facts:
raise ServerDataException(
"There is no {0} info for this "
"environment!".format(fact_type)
)
return facts
def get_facts(self, fact_type, nodes=None):
facts = self.connection.get_request(
self._get_fact_url(fact_type, nodes=nodes))
if not facts:
raise ServerDataException(
"There is no {0} info for this "
"environment!".format(fact_type)
)
return facts
def upload_facts(self, fact_type, facts):
self.connection.put_request(self._get_fact_url(fact_type), facts)
def delete_facts(self, fact_type):
self.connection.delete_request(self._get_fact_url(fact_type))
def read_fact_info(self, fact_type, directory, serializer=None):
return getattr(
self, "read_{0}_info".format(fact_type)
)(fact_type, directory=directory, serializer=serializer)
def write_facts_to_dir(self, fact_type, facts,
directory=os.path.curdir, serializer=None):
dir_name = self._get_fact_dir_name(fact_type, directory=directory)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
if isinstance(facts, dict):
engine_file_path = os.path.join(dir_name, "engine")
(serializer or self.serializer).write_to_file(
engine_file_path, facts["engine"])
facts = facts["nodes"]
name_template = u"{name}"
else:
name_template = "{role}_{uid}"
for _fact in facts:
fact_path = os.path.join(
dir_name,
name_template.format(**_fact)
)
(serializer or self.serializer).write_to_file(fact_path, _fact)
return dir_name
def read_deployment_info(self, fact_type,
directory=os.path.curdir, serializer=None):
dir_name = self._get_fact_dir_name(fact_type, directory=directory)
return map(
lambda f: (serializer or self.serializer).read_from_file(f),
[os.path.join(dir_name, json_file)
for json_file in listdir_without_extensions(dir_name)]
)
def read_provisioning_info(self, fact_type,
directory=os.path.curdir, serializer=None):
dir_name = self._get_fact_dir_name(fact_type, directory=directory)
node_facts = map(
lambda f: (serializer or self.serializer).read_from_file(f),
[os.path.join(dir_name, fact_file)
for fact_file in listdir_without_extensions(dir_name)
if "engine" != fact_file]
)
engine = (serializer or self.serializer).read_from_file(
os.path.join(dir_name, "engine"))
return {
"engine": engine,
"nodes": node_facts
}
def get_testsets(self):
return self.connection.get_request(
'testsets/{0}'.format(self.id),
ostf=True
)
@property
def is_customized(self):
data = self.get_fresh_data()
return data["is_customized"]
def is_in_running_test_sets(self, test_set):
return test_set["testset"] in self._test_sets_to_run,
def run_test_sets(self, test_sets_to_run):
self._test_sets_to_run = test_sets_to_run
tests_data = map(
lambda testset: {
"testset": testset,
"metadata": {
"config": {},
"cluster_id": self.id
}
},
test_sets_to_run
)
return self.connection.post_request(
"testruns",
tests_data,
ostf=True
)
def get_state_of_tests(self):
return self.connection.get_request(
"testruns/last/{0}".format(self.id),
ostf=True
)
def stop(self):
return Task.init_with_data(
self.connection.put_request(
"clusters/{0}/stop_deployment/".format(self.id),
{}
)
)
def reset(self):
return Task.init_with_data(
self.connection.put_request(
"clusters/{0}/reset/".format(self.id),
{}
)
)
def _get_method_url(self, method_type, nodes):
return "clusters/{0}/{1}/?nodes={2}".format(
self.id,
method_type,
','.join(map(lambda n: str(n.id), nodes)))
def install_selected_nodes(self, method_type, nodes):
return Task.init_with_data(
self.connection.put_request(
self._get_method_url(method_type, nodes),
{}
)
)
|
Mirantis/pumphouse
|
pumphouse/_vendor/fuelclient/objects/environment.py
|
Python
|
apache-2.0
| 11,875
|
#!/usr/bin/env python
import redhawk.common.node as N
import redhawk.common.types as T
import redhawk.utils.util as U
from . import writer
import itertools
import pygraphviz
def WriteToDot(tree):
s = DotWriter()
return s.WriteTree(tree)
def WriteToImage(tree, fmt='png', filename=None):
s = DotWriter()
s.AddTree(tree)
s.Draw(path=filename, fmt=fmt)
return
def EscapeWhitespace(s):
a = s.replace("\n", "\\\\n").replace("\t", "\\\\t")
return a
class DotWriter(writer.Writer):
def __init__(self):
self.node_name_counter = itertools.count(0)
self.graph = pygraphviz.AGraph(directed=True, rankdir='LR')
self.graph.layout(prog='dot')
return
def WriteTree(self, tree):
""" Implementation of the base class method for writing the tree to a
string."""
self.AddTree(tree)
return self.graph.to_string()
def Draw(self, path, fmt='png'):
self.graph.draw(path=path, format=fmt, prog='dot')
return
def AddTree(self, tree):
""" Adds the tree to the graph."""
self.AddASTNodeToGraph(None, tree)
return
def __CreateGraphNode(self, **attrs):
""" Create a graph node with the give attributes."""
node_index = self.node_name_counter.next()
self.graph.add_node(node_index, **attrs)
return node_index
def __CreateGraphNodeFromAST(self, ast_node):
""" Create a Graph Node (with the relevant attributes)
from the ast_node
Return the node index."""
name, attrs = ast_node.GetDotAttributes()
label = [name]
label += ["%s: %s"%(EscapeWhitespace(str(k)), EscapeWhitespace(str(v)))
for (k, v) in attrs.items() if type(v) is str]
if isinstance(ast_node, T.Type):
color = "gray"
fontcolor = "blue"
else:
color = "gray"
fontcolor = "black"
return self.__CreateGraphNode(label = ", ".join(label)
,shape = "box"
,color = color
,fontcolor = fontcolor
,fontname = "Sans"
,fontsize = "10")
def __CreateEmptyGraphNode(self):
""" Create an Empty Node (with style), and return its index."""
return self.__CreateGraphNode(shape='circle',
style='filled',
label="",
height='.1',
width='.1')
def AddASTNodeToGraph(self, parent_index, ast_node):
""" Creates a Graph Node from the given AST node,
marks its parent as the graph node with the given
`parent_index`, and recurses on the given AST
node's children."""
node_index = self.__CreateGraphNodeFromAST(ast_node)
if parent_index is not None:
self.graph.add_edge(parent_index, node_index)
children = ast_node.GetChildren()
for child in children:
if child is None:
continue
if type(child) is list:
empty_node = self.__CreateEmptyGraphNode()
self.graph.add_edge(node_index, empty_node)
map(lambda a: self.AddASTNodeToGraph(empty_node, a),
child)
elif isinstance(child, N.Node):
self.AddASTNodeToGraph(node_index, child)
elif child is None:
continue
else:
raise ValueError("%s's child (type: %s) was supposed to be a Node!\n %s"
%(ast_node.GetName(), type(child), ast_node))
return
|
JordanMilne/Redhawk
|
redhawk/common/writers/dot_writer.py
|
Python
|
bsd-2-clause
| 3,490
|
# Third-party
import astropy.units as u
import numpy as np
# Project
from ..data import RVData
from ..samples import JokerSamples
from ..samples_analysis import (
MAP_sample, is_P_unimodal, is_P_Kmodal,
max_phase_gap, phase_coverage, periods_spanned)
def test_MAP_sample():
samples = JokerSamples()
samples['ln_likelihood'] = np.array([1., 2, 3.])
samples['ln_prior'] = np.array([1., 2, 3.])
sample = MAP_sample(samples)
assert sample['ln_likelihood'] == 3.
sample, i = MAP_sample(samples, return_index=True)
assert i == 2
def test_is_P_unimodal():
np.random.seed(42)
samples = JokerSamples()
samples['P'] = np.random.normal(8, 1e-3, size=32) * u.day
size = 8
t = 56000 + np.random.uniform(0, 100, size=size)
rv = np.random.normal(0, 10, size=size) * u.km/u.s
rv_err = np.random.uniform(0.1, 0.3, size=size) * u.km/u.s
data = RVData(t=t, rv=rv, rv_err=rv_err)
assert is_P_unimodal(samples, data)
def test_is_P_Kmodal():
np.random.seed(42)
samples = JokerSamples()
samples['P'] = np.concatenate((np.random.normal(8, 1e-3, size=32),
np.random.normal(21, 1e-3, size=32))) * u.day
samples['ln_likelihood'] = np.random.uniform(size=len(samples['P']))
samples['ln_prior'] = np.random.uniform(size=len(samples['P']))
size = 8
t = 56000 + np.random.uniform(0, 100, size=size)
rv = np.random.normal(0, 10, size=size) * u.km/u.s
rv_err = np.random.uniform(0.1, 0.3, size=size) * u.km/u.s
data = RVData(t=t, rv=rv, rv_err=rv_err)
Kmodal, means, npermode = is_P_Kmodal(samples, data, n_clusters=2)
assert Kmodal
assert np.min(np.abs(means - 8.*u.day)) < 0.1*u.day
assert np.min(np.abs(means - 21.*u.day)) < 0.1*u.day
assert np.all(npermode == 32)
def test_max_phase_gap():
samples = JokerSamples()
samples['P'] = 6.243 * np.ones(8) * u.day
phase = np.array([0, 0.1, 0.7, 0.8, 0.9])
t = 56000 + phase * samples['P'][0].value
rv = np.random.normal(0, 10, size=len(phase)) * u.km/u.s
rv_err = np.random.uniform(0.1, 0.3, size=len(phase)) * u.km/u.s
data = RVData(t=t, rv=rv, rv_err=rv_err)
assert np.isclose(max_phase_gap(samples[0], data), 0.6, atol=1e-5)
def test_phase_coverage():
samples = JokerSamples()
samples['P'] = 6.243 * np.ones(8) * u.day
phase = np.array([0, 0.1, 0.7, 0.8, 0.9])
t = 56000 + phase * samples['P'][0].value
rv = np.random.normal(0, 10, size=len(phase)) * u.km/u.s
rv_err = np.random.uniform(0.1, 0.3, size=len(phase)) * u.km/u.s
data = RVData(t=t, rv=rv, rv_err=rv_err)
phasecov = phase_coverage(samples[0], data)
assert phasecov >= 0.4
assert phasecov <= 0.6
def test_periods_spanned():
samples = JokerSamples()
samples['P'] = 6.243 * np.ones(8) * u.day
phase = np.linspace(0, 3.3, 16)
t = 56000 + phase * samples['P'][0].value
rv = np.random.normal(0, 10, size=len(phase)) * u.km/u.s
rv_err = np.random.uniform(0.1, 0.3, size=len(phase)) * u.km/u.s
data = RVData(t=t, rv=rv, rv_err=rv_err)
span = periods_spanned(samples[0], data)
assert np.isclose(span, phase[-1], atol=1e-5)
|
adrn/thejoker
|
thejoker/tests/test_samples_analysis.py
|
Python
|
mit
| 3,194
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from users.models import Hijo
class Post(models.Model):
title = models.TextField(null=True, blank=True) #Título
content = models.TextField(null=True, blank=True) #Contenido de la entrada
url = models.URLField(max_length=200, null=True, blank=True)
hijo = models.ForeignKey(Hijo, blank=True, null=True)
date_creation = models.DateTimeField(auto_now_add=True)
published_by = models.ForeignKey(User, blank=True, null=True)
|
pupils/pupils
|
pupils/tablon/models.py
|
Python
|
agpl-3.0
| 566
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import base64
import binascii
import cgi
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent,
)
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser:
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get('CONTENT_TYPE', '')
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary.decode())
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, str):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys):
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except binascii.Error:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
self._post.appendlist(field_name, force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
if not file_name:
continue
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(
field_name, file_name, content_type,
content_length, charset, content_type_extra,
)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as exc:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data.") from exc
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
# Don't continue if the chunk received by
# the handler is None.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
# any() shortcircuits if a handler's upload_complete() returns a value.
any(handler.upload_complete() for handler in handlers)
self._post._mutable = False
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_text(old_field_name, self._encoding, errors='replace'), file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream:
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
Return whatever chunk is conveniently returned from the iterator.
Useful to avoid unnecessary bookkeeping if performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replace the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Place bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = bytes + self._leftover
def _update_unget_history(self, num_bytes):
"""
Update the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([
current_number for current_number in self._unget_history
if current_number == num_bytes
])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter:
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, yield chunks of read operations from that object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter:
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter:
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data):
"""
Find a multipart boundary in data.
Should no boundary exist in the data, return None. Otherwise, return
a tuple containing the indices of the following:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""Exhaust an iterator or stream."""
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parse one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser:
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
"""
Parse the header into a key-value.
Input (line): bytes, output: str for key/name, bytes for values which
will be decoded later.
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
value = unquote(value.decode(), encoding=encoding.decode())
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/http/multipartparser.py
|
Python
|
mit
| 24,849
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from datetime import datetime
from rest_framework import serializers, permissions, exceptions, status
from rest_framework.viewsets import ModelViewSet, ViewSetMixin
import settings
from amcat.models import Project, Role
from amcat.tools.caching import cached
from api.rest.mixins import DatatablesMixin
from api.rest.serializer import AmCATProjectModelSerializer
from amcat.models.authorisation import (ROLE_PROJECT_READER, ROLE_PROJECT_WRITER,
ROLE_PROJECT_ADMIN, ROLE_PROJECT_METAREADER)
import logging
from api.rest.viewset import AmCATViewSetMixin
log = logging.getLogger(__name__)
__all__ = ("CannotEditLinkedResource", "NotFoundInProject", "ProjectPermission",
"ProjectViewSetMixin", "ProjectSerializer", "ProjectViewSet")
_DEFAULT_PERMISSION_MAP = {
'OPTIONS' : True,
'HEAD' : True,
'GET' : ROLE_PROJECT_METAREADER,
'POST' : ROLE_PROJECT_WRITER,
'PUT' : False,
'PATCH' : ROLE_PROJECT_ADMIN,
'DELETE' : ROLE_PROJECT_ADMIN,
}
class CannotEditLinkedResource(exceptions.PermissionDenied):
default_detail = 'Cannot modify a linked resource, please edit via the owning project'
class NotFoundInProject(exceptions.APIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = 'The requested resource does not exist in the given project'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class OptionalIsAuthenticated(permissions.IsAuthenticated):
def has_permission(self, request, view):
if not settings.amcat_config.get('auth', 'require_login'):
return True
return super().has_permission(request, view)
class ProjectPermission(permissions.BasePermission):
"""
Checks permissions based on the user's project role
Uses view.permission_map, defaulting to _DEFAULT_PERMISSIONS_MAP
Assumes .project is defined on the view
"""
def has_permission(self, request, view):
# When viewing project lists, no project is in context
if view.project is None:
return True
user = request.user if request.user.is_authenticated() else None
if user and user.is_superuser:
return True
required_role_id = view.required_role_id(request)
if required_role_id in (True, False):
return required_role_id
actual_role_id = view.project.get_role_id(user=user)
if actual_role_id is None or actual_role_id < required_role_id:
log.warning("User {user} has role {actual_role_id} < {required_role_id}".format(**locals()))
if actual_role_id is None:
return False
return actual_role_id >= required_role_id
class ProjectSerializer(AmCATProjectModelSerializer):
"""
This serializer includes another boolean field `favourite` which is is True
when the serialized project is in request.user.user_profile.favourite_projects.
"""
favourite = serializers.SerializerMethodField("is_favourite")
last_visited_at = serializers.SerializerMethodField("project_visited_at", allow_null=True)
display_columns = serializers.SerializerMethodField()
@property
@cached
def favourite_projects(self):
"""List of id's of all favourited projects by the currently logged in user"""
user = self.context['request'].user
if user.is_anonymous():
return set()
else:
return set(self.context['request'].user.userprofile
.favourite_projects.values_list("id", flat=True))
def is_favourite(self, project):
if project is None: return
return project.id in self.favourite_projects
def get_display_columns(self, project):
if project is None:
return
return project.display_columns
@property
@cached
def project_visited_dates(self):
user = self.context['request'].user
if user.is_anonymous():
return dict()
return dict((rp.project, rp.format_date_visited_as_delta()) for rp in user.userprofile.get_recent_projects())
def project_visited_at(self, project):
return self.project_visited_dates.get(project, "Never")
def get_fields(self):
fields = super(ProjectSerializer, self).get_fields()
#push last_visited_at on top for better column ordering
fields['last_visited_at'] = fields.pop('last_visited_at')
return fields
class Meta:
model = Project
fields = '__all__'
class ProjectViewSetMixin(AmCATViewSetMixin):
permission_classes = (OptionalIsAuthenticated, ProjectPermission)
serializer_class = ProjectSerializer
model_key = "project"
queryset = Project.objects.all()
def required_role_id(self, request):
required_role_id = getattr(self, 'permission_map', {}).get(request.method.upper())
if required_role_id is None:
required_role_id = _DEFAULT_PERMISSION_MAP[request.method.upper()]
return required_role_id
class ProjectViewSet(ProjectViewSetMixin, DatatablesMixin, ModelViewSet):
model = Project
ignore_filters = ('display_columns',)
@property
def project(self):
if 'pk' in self.kwargs:
return Project.objects.get(pk=self.kwargs['pk'])
else:
return None # no permissions needed. Not a very elegant signal?
def filter_queryset(self, queryset):
qs = super(ProjectViewSet, self).filter_queryset(queryset)
role = Role.objects.get(label="metareader")
if self.request.user.is_anonymous():
return qs.filter(guest_role__id__gte=role.id)
else:
return qs.filter(id__in=self.request.user.userprofile.get_projects(role))
|
amcat/amcat
|
api/rest/viewsets/project.py
|
Python
|
agpl-3.0
| 7,130
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: identities.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='identities.proto',
package='msp',
syntax='proto3',
serialized_pb=_b('\n\x10identities.proto\x12\x03msp\"4\n\x12SerializedIdentity\x12\r\n\x05Mspid\x18\x01 \x01(\t\x12\x0f\n\x07IdBytes\x18\x02 \x01(\x0c\x42#Z!github.com/hyperledger/fabric/mspb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERIALIZEDIDENTITY = _descriptor.Descriptor(
name='SerializedIdentity',
full_name='msp.SerializedIdentity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Mspid', full_name='msp.SerializedIdentity.Mspid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='IdBytes', full_name='msp.SerializedIdentity.IdBytes', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=77,
)
DESCRIPTOR.message_types_by_name['SerializedIdentity'] = _SERIALIZEDIDENTITY
SerializedIdentity = _reflection.GeneratedProtocolMessageType('SerializedIdentity', (_message.Message,), dict(
DESCRIPTOR = _SERIALIZEDIDENTITY,
__module__ = 'identities_pb2'
# @@protoc_insertion_point(class_scope:msp.SerializedIdentity)
))
_sym_db.RegisterMessage(SerializedIdentity)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z!github.com/hyperledger/fabric/msp'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
rameshbabu79/fabric
|
bddtests/identities_pb2.py
|
Python
|
apache-2.0
| 2,901
|
# -*- coding: utf-8 -*-
"""Tests del modulo pydatajson."""
from __future__ import print_function, unicode_literals, with_statement
import os.path
import requests_mock
import vcr
from nose.tools import assert_true, assert_false, assert_equal, \
assert_not_in, assert_in
from pydatajson.indicators import _eventual_periodicity
try:
import mock
except ImportError:
from unittest import mock
from pydatajson.core import DataJson
from pydatajson import readers
from .support.decorators import RESULTS_DIR
my_vcr = vcr.VCR(
path_transformer=vcr.VCR.ensure_suffix('.yaml'),
cassette_library_dir=os.path.join(
"tests",
"cassetes",
"indicators"),
record_mode='once')
class TestIndicatorsTestCase(object):
SAMPLES_DIR = os.path.join("tests", "samples")
RESULTS_DIR = RESULTS_DIR
TEMP_DIR = os.path.join("tests", "temp")
@classmethod
def get_sample(cls, sample_filename):
return os.path.join(cls.SAMPLES_DIR, sample_filename)
@classmethod
def setUp(cls):
cls.dj = DataJson(cls.get_sample("full_data.json"))
cls.catalog = readers.read_catalog(
cls.get_sample("full_data.json"))
cls.maxDiff = None
cls.longMessage = True
cls.requests_mock = requests_mock.Mocker()
cls.requests_mock.start()
cls.requests_mock.get(requests_mock.ANY, real_http=True)
cls.requests_mock.head(requests_mock.ANY, status_code=200)
@classmethod
def tearDown(cls):
del (cls.dj)
cls.requests_mock.stop()
@my_vcr.use_cassette()
def test_generate_catalog_indicators(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
# Resultados esperados haciendo cuentas manuales sobre el catálogo
expected = {
'identifier': '7d4d816f-3a40-476e-ab71-d48a3f0eb3c8',
'title': 'Cosechando Datos Argentina',
'datasets_cant': 3,
'distribuciones_cant': 6,
'datasets_meta_ok_cant': 2,
'datasets_meta_error_cant': 1,
'datasets_meta_ok_pct': 0.6667,
'datasets_con_datos_cant': 2,
'datasets_sin_datos_cant': 1,
'datasets_con_datos_pct': 0.6667,
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_date_indicators(self):
from datetime import datetime
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
dias_diff = (datetime.now() - datetime(2016, 4, 14)).days
expected = {
'catalogo_ultima_actualizacion_dias': dias_diff,
'datasets_actualizados_cant': 1,
'datasets_desactualizados_cant': 2,
'datasets_actualizados_pct': 0.3333,
'datasets_frecuencia_cant': {
'R/P1W': 1,
'R/P1M': 1,
'EVENTUAL': 1
},
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_format_indicators(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'distribuciones_formatos_cant': {
'CSV': 1,
'XLSX': 1,
'PDF': 1,
'NONE': 3
}
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_license_indicators(self):
catalog = os.path.join(
self.SAMPLES_DIR,
"several_datasets_with_licenses.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'datasets_licencias_cant': {
'Open Data Commons Open Database License 1.0': 1,
'Creative Commons Attribution': 1,
'None': 1
}
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_no_licenses_indicators(self):
# No tienen licencias
catalog = os.path.join(
self.SAMPLES_DIR,
"several_datasets_for_harvest.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
assert_equal(indicators['datasets_licencias_cant'], {'None': 3})
@my_vcr.use_cassette()
def test_field_indicators_on_min_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "minimum_data.json")
# Se espera un único catálogo como resultado, índice 0
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'campos_recomendados_pct': 0.0,
'campos_optativos_pct': 0.0,
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_field_indicators_on_full_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "full_data.json")
# Se espera un único catálogo como resultado, índice 0
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'campos_recomendados_pct': 0.9545,
'campos_optativos_pct': 1.0000
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_federation_indicators_same_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicators = self.dj.generate_catalogs_indicators(catalog, catalog)[1]
# Esperado: todos los datasets están federados
expected = {
'datasets_federados_cant': 3,
'datasets_no_federados_cant': 0,
'datasets_no_federados': [],
'datasets_federados_pct': 1.0000,
'distribuciones_federadas_cant': 6
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_federation_indicators_no_datasets(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
central = os.path.join(self.SAMPLES_DIR, "catalogo_justicia.json")
indicators = self.dj.generate_catalogs_indicators(catalog, central)[1]
# Esperado: ningún dataset está federado
expected = {
'datasets_federados_cant': 0,
'datasets_no_federados_cant': 3,
'datasets_no_federados': [
('Sistema de contrataciones electrónicas UNO', None),
('Sistema de contrataciones electrónicas DOS', None),
('Sistema de contrataciones electrónicas TRES', None)],
'datasets_federados_pct': 0.00,
'distribuciones_federadas_cant': 0
}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_federation_indicators_removed_datasets(self):
# CASO 1
# se buscan los datasets federados en el central que fueron eliminados
# en el específico pero no se encuentran porque el publisher.name no
# tiene publicado ningún otro dataset en el catálogo específico
catalog = os.path.join(
self.SAMPLES_DIR, "catalogo_justicia_removed.json"
)
central = os.path.join(self.SAMPLES_DIR, "catalogo_justicia.json")
indicators = self.dj.generate_catalogs_indicators(catalog, central)[1]
# Esperado: no se encuentra el dataset removido, porque el
# publisher.name no existe en ningún otro dataset
expected = {
"datasets_federados_eliminados_cant": 0,
"datasets_federados_eliminados": []
}
for k, v in expected.items():
assert_equal(indicators[k], v)
# CASO 2
# se buscan los datasets federados en el central que fueron eliminados
# en el específico y se encuentran porque el publisher.name tiene
# publicado otro dataset en el catálogo específico
catalog = os.path.join(
self.SAMPLES_DIR, "catalogo_justicia_removed_publisher.json"
)
indicators = self.dj.generate_catalogs_indicators(catalog, central)[1]
# Esperado: no se encuentra el dataset removido, porque el
# publisher.name no existe en ningún otro dataset
expected = {
"datasets_federados_eliminados_cant": 1,
"datasets_federados_eliminados": [
('Base de datos legislativos Infoleg',
"http://datos.jus.gob.ar/dataset/base-de-datos"
"-legislativos-infoleg")]}
for k, v in expected.items():
assert_equal(indicators[k], v)
@my_vcr.use_cassette()
def test_network_indicators(self):
one_catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
other_catalog = os.path.join(self.SAMPLES_DIR, "full_data.json")
indicators, network_indicators = self.dj.generate_catalogs_indicators([
one_catalog,
other_catalog,
], broken_links=True)
# Esperado: suma de los indicadores individuales
# No se testean los indicadores de actualización porque las fechas no
# se mantienen actualizadas
expected = {
'catalogos_cant': 2,
'datasets_cant': 5,
'distribuciones_cant': 8,
'datasets_meta_ok_cant': 4,
'datasets_meta_error_cant': 1,
'datasets_meta_ok_pct': 0.8000,
'datasets_con_datos_cant': 3,
'datasets_sin_datos_cant': 2,
'datasets_con_datos_pct': 0.6000,
'distribuciones_formatos_cant': {
'CSV': 2,
'XLSX': 1,
'PDF': 2,
'NONE': 3
},
'distribuciones_tipos_cant': {
'file': 1,
'documentation': 1,
'None': 6
},
'datasets_licencias_cant': {
'Open Data Commons Open Database License 1.0': 2,
'None': 3
},
'campos_optativos_pct': 0.3256,
'campos_recomendados_pct': 0.5072,
'distribuciones_download_url_ok_cant': 6,
'distribuciones_download_url_error_cant': 2,
'distribuciones_download_url_ok_pct': 0.75
}
for k, v in expected.items():
assert_equal(network_indicators[k], v)
@my_vcr.use_cassette()
def test_network_license_indicators(self):
one_catalog = os.path.join(
self.SAMPLES_DIR,
"several_datasets_with_licenses.json")
other_catalog = os.path.join(self.SAMPLES_DIR, "full_data.json")
indicators, network_indicators = self.dj.generate_catalogs_indicators([
one_catalog,
other_catalog
])
# Esperado: 2 ODbL en full, 1 en several
# 1 Creative Commons en several
# 1 Dataset en several sin licencias
expected = {
'catalogos_cant': 2,
'datasets_cant': 5,
'datasets_licencias_cant': {
'Open Data Commons Open Database License 1.0': 3,
'Creative Commons Attribution': 1,
'None': 1
},
}
for k, v in expected.items():
assert_equal(network_indicators[k], v)
@my_vcr.use_cassette()
def test_network_type_indicators(self):
one_catalog = os.path.join(
self.SAMPLES_DIR,
"several_datasets_with_types.json")
other_catalog = os.path.join(self.SAMPLES_DIR, "full_data.json")
indicators, network_indicators = self.dj.generate_catalogs_indicators([
one_catalog,
other_catalog
])
# Esperado: 1 file en full, 1 en several
# 1 file.upload en several
# 1 documentation en full, 1 en several
# 2 api en several
# 1 distribucion en several sin tipo
expected = {
'catalogos_cant': 2,
'distribuciones_cant': 8,
'distribuciones_tipos_cant': {
'file': 2,
'file.upload': 1,
'documentation': 2,
'api': 2,
'None': 1,
}
}
for k, v in expected.items():
assert_equal(network_indicators[k], v, k)
@my_vcr.use_cassette()
def test_types_indicators(self):
catalog = os.path.join(
self.SAMPLES_DIR,
"several_datasets_with_types.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'distribuciones_tipos_cant': {
'file': 1,
'file.upload': 1,
'documentation': 1,
'api': 2,
'None': 1
}
}
for k, v in expected.items():
assert_equal(indicators[k], v)
def test_network_federation_indicators(self):
one_catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
other_catalog = os.path.join(self.SAMPLES_DIR, "full_data.json")
central = one_catalog
indicators, network_indicators = self.dj.generate_catalogs_indicators([
one_catalog,
other_catalog
], central)
# Esperado: Los datasets de several estan federados y los de full, no
expected = {
'datasets_federados_cant': 3,
'datasets_no_federados_cant': 2,
'datasets_federados_pct': 0.6000,
'distribuciones_federadas_cant': 6
}
for k, v in expected.items():
assert_equal(network_indicators[k], v)
@my_vcr.use_cassette()
def test_indicators_invalid_periodicity(self):
catalog = os.path.join(self.SAMPLES_DIR,
"malformed_accrualperiodicity.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
# Periodicidad inválida se considera automáticamente como
# catálogo desactualizado
expected = {
'datasets_actualizados_cant': 0,
'datasets_desactualizados_cant': 1,
'datasets_actualizados_pct': 0
}
for k, v in expected.items():
assert_equal(indicators[k], v, k)
@my_vcr.use_cassette()
def test_indicators_missing_periodicity(self):
catalog = os.path.join(self.SAMPLES_DIR, "missing_periodicity.json")
# Dataset con periodicidad faltante no aporta valores para indicadores
# de tipo 'datasets_(des)actualizados'
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'datasets_actualizados_cant': 0,
'datasets_desactualizados_cant': 0,
'datasets_actualizados_pct': 0
}
for k, v in expected.items():
assert_equal(indicators[k], v, k)
@my_vcr.use_cassette()
def test_indicators_missing_dataset(self):
catalog = os.path.join(self.SAMPLES_DIR, "missing_dataset.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
# Catálogo sin datasets no aporta indicadores significativos
expected = {
'datasets_cant': 0,
'datasets_meta_ok_cant': 0,
'datasets_meta_error_cant': 0,
'datasets_actualizados_cant': 0,
'datasets_desactualizados_cant': 0,
'datasets_actualizados_pct': 0,
'distribuciones_formatos_cant': {},
'datasets_licencias_cant': {},
'datasets_frecuencia_cant': {}
}
for k, v in expected.items():
assert_equal(indicators[k], v, k)
@my_vcr.use_cassette()
def test_last_updated_indicator_missing_issued_field(self):
from datetime import datetime
catalog = os.path.join(self.SAMPLES_DIR, "minimum_data.json")
indicators = self.dj.generate_catalogs_indicators(catalog)[0][0]
dias_diff = (datetime.now() - datetime(2016, 4, 14)).days
# Catálogo no tiene 'issued', pero su dataset sí -> uso el del dataset
expected = {
'catalogo_ultima_actualizacion_dias': dias_diff
}
for k, v in expected.items():
assert_equal(indicators[k], v, k)
def test_dataset_is_updated(self):
catalog = os.path.join(self.SAMPLES_DIR, "catalogo_justicia.json")
# Datasset con periodicity mensual vencida
dataset = "Base de datos legislativos Infoleg"
assert_false(self.dj.dataset_is_updated(catalog, dataset))
# Dataset con periodicity eventual, siempre True
dataset = "Declaración Jurada Patrimonial Integral de carácter público"
assert_true(self.dj.dataset_is_updated(catalog, dataset))
@my_vcr.use_cassette()
def test_date_network_indicators_empty_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "invalid_catalog_empty.json")
indics, network_indics = self.dj.generate_catalogs_indicators(
[catalog,
catalog]
)
for k, v in network_indics.items():
assert_true(v is not None)
def test_unreachable_catalogs(self):
catalog = os.path.join(self.SAMPLES_DIR, "invalid/path.json")
indics, network_indics = self.dj.generate_catalogs_indicators(
[catalog,
catalog]
)
assert_equal([], indics)
assert_equal({}, network_indics)
@my_vcr.use_cassette()
def test_valid_and_unreachable_catalogs(self):
valid = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
unreachable = os.path.join(self.SAMPLES_DIR, "invalid/path.json")
indicators = self.dj.generate_catalogs_indicators(
[valid, unreachable])[0][0]
# El resultado ignora el catálogo inaccesible
expected = {
'datasets_cant': 3,
'distribuciones_cant': 6,
'datasets_meta_ok_cant': 2,
'datasets_meta_error_cant': 1,
'datasets_meta_ok_pct': 0.6667,
}
for k, v in expected.items():
assert_true(indicators[k], v)
def test_unreachable_central_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
unreachable = os.path.join(self.SAMPLES_DIR, "invalid/path.json")
indics = self.dj.generate_catalogs_indicators(
catalog, central_catalog=unreachable)[0][0]
expected = {
'datasets_cant': 3,
'distribuciones_cant': 6,
'datasets_meta_ok_cant': 2,
'datasets_meta_error_cant': 1,
'datasets_meta_ok_pct': 0.6667,
'datasets_federados_cant': None,
'datasets_no_federados_cant': None,
'datasets_federados_pct': None,
'datasets_federados': [],
'datasets_no_federados': [],
'datasets_federados_eliminados': [],
}
for k, v in expected.items():
assert_equal(indics[k], v)
@my_vcr.use_cassette()
@mock.patch('pydatajson.status_indicators_generator.'
'generate_datasets_summary', autospec=True)
def test_bad_summary(self, mock_summary):
mock_summary.side_effect = Exception("bad summary")
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indics = self.dj.generate_catalogs_indicators(catalog)[0][0]
expected = {
'datasets_cant': None,
'distribuciones_cant': None,
'datasets_meta_ok_cant': None,
'datasets_meta_error_cant': None,
'datasets_meta_ok_pct': None
}
for k, v in expected.items():
assert_equal(indics[k], v)
@my_vcr.use_cassette()
def test_bad_date_indicators(self):
catalog = self.dj
catalog['issued'] = catalog['modified'] = 'invalid_date'
indics = self.dj.generate_catalogs_indicators()[0][0]
expected = {
'datasets_desactualizados_cant': None,
'datasets_actualizados_cant': None,
'datasets_actualizados_pct': None,
'catalogo_ultima_actualizacion_dias': None,
'datasets_frecuencia_cant': {}
}
for k, v in expected.items():
assert_equal(indics[k], v)
@my_vcr.use_cassette()
def test_no_title_nor_identifier_catalog(self):
catalog = DataJson(
os.path.join(
self.SAMPLES_DIR,
"missing_catalog_title.json"))
del catalog['identifier']
indics = self.dj.generate_catalogs_indicators(catalog)[0][0]
assert_equal(indics['title'], 'no-title')
assert_equal(indics['identifier'], 'no-id')
def test_node_indicators_no_central_catalog(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
node_indicators, network_indicators = \
self.dj.generate_catalogs_indicators(catalog)
# Esperado: no se calculan indicadores de federación
federation_indicators = [
'datasets_federados_cant',
'datasets_no_federados_cant',
'datasets_no_federados',
'distribuciones_federadas_cant']
for fed_ind in federation_indicators:
assert_true(fed_ind not in node_indicators[0])
assert_true(fed_ind not in network_indicators)
def test_federation_indicators_by_id(self):
catalogs = [
os.path.join(self.SAMPLES_DIR, "federated_1.json"),
os.path.join(self.SAMPLES_DIR, "federated_2.json"),
]
central = os.path.join(self.SAMPLES_DIR, "central.json")
indicators = self.dj.generate_catalogs_indicators(
catalogs, central, identifier_search=True)[1]
expected = {
'datasets_federados_cant': 2,
'datasets_no_federados_cant': 2,
'datasets_no_federados': [
('Sistema de contrataciones electrónicas',
'http://datos.gob.ar/dataset/contrataciones-electronicas'),
('Sistema de contrataciones electrónicas (sin datos)',
'http://datos.gob.ar/dataset/argentina-compra'),
],
'datasets_federados_pct': 0.5000,
'distribuciones_federadas_cant': 2
}
for k, v in expected.items():
assert_equal(indicators[k], v)
def test_eventual_periodicity(self):
assert_true(_eventual_periodicity('eventual'))
assert_true(_eventual_periodicity('EVENTUAL'))
assert_false(_eventual_periodicity('not eventual'))
def test_validate_download_url_indicator(self):
catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicators = self.dj.generate_catalogs_indicators(
catalog, broken_links=True)[0][0]
assert_equal(indicators['distribuciones_download_url_ok_cant'], 4)
assert_equal(indicators['distribuciones_download_url_error_cant'], 2)
assert_equal(indicators['distribuciones_download_url_ok_pct'], 0.6667)
def test_indicators_does_not_include_urls_by_default(self):
one_catalog = os.path.join(self.SAMPLES_DIR, "several_datasets.json")
indicator = self.dj.generate_catalogs_indicators([
one_catalog,
])[0][0]
expected = {
'datasets_cant': 3,
'distribuciones_cant': 6,
'datasets_meta_ok_cant': 2,
'datasets_meta_error_cant': 1,
'datasets_meta_ok_pct': 0.6667,
'datasets_con_datos_cant': 2,
'datasets_sin_datos_cant': 1,
'datasets_con_datos_pct': 0.6667,
'datasets_desactualizados_cant': 2,
'datasets_actualizados_cant': 1,
'datasets_actualizados_pct': 0.3333,
'catalogo_ultima_actualizacion_dias': 1279,
'datasets_frecuencia_cant': {
'R/P1W': 1,
'EVENTUAL': 1,
'R/P1M': 1
},
'distribuciones_formatos_cant': {
'NONE': 3,
'XLSX': 1,
'PDF': 1,
'CSV': 1
},
'distribuciones_tipos_cant': {
'None': 6
},
'datasets_licencias_cant': {
'None': 3
},
'campos_recomendados_pct': 0.0972,
'campos_optativos_pct': 0.0,
'title': 'Cosechando Datos Argentina',
'identifier': '7d4d816f-3a40-476e-ab71-d48a3f0eb3c8'
}
not_expected = {
'distribuciones_download_url_ok_cant': 4,
'distribuciones_download_url_error_cant': 2,
'distribuciones_download_url_ok_pct': 0.6667,
}
for k, _ in expected.items():
assert_in(k, indicator)
for k, _ in not_expected.items():
assert_not_in(k, indicator)
|
datosgobar/pydatajson
|
tests/test_indicators.py
|
Python
|
mit
| 25,279
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="bordercolor",
parent_name="layout.xaxis.rangeselector",
**kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/layout/xaxis/rangeselector/_bordercolor.py
|
Python
|
mit
| 464
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ApiAccessRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('status', models.CharField(default=b'pending', help_text='Status of this API access request', max_length=255, db_index=True, choices=[(b'pending', 'Pending'), (b'denied', 'Denied'), (b'approved', 'Approved')])),
('website', models.URLField(help_text='The URL of the website associated with this API user.')),
('reason', models.TextField(help_text='The reason this user wants to access the API.')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='HistoricalApiAccessRequest',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('status', models.CharField(default=b'pending', help_text='Status of this API access request', max_length=255, db_index=True, choices=[(b'pending', 'Pending'), (b'denied', 'Denied'), (b'approved', 'Approved')])),
('website', models.URLField(help_text='The URL of the website associated with this API user.')),
('reason', models.TextField(help_text='The reason this user wants to access the API.')),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical api access request',
},
),
]
|
ampax/edx-platform
|
openedx/core/djangoapps/api_admin/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 3,299
|
import gzip
import logging
import os
import shutil
import uuid
from PIL import Image
from zipfile import ZipFile
import pandas as pd
import numpy as np
import pyresample as pr
from trollvalidation.validations import configuration as cfg
LOG = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG,
# format='[%(levelname)s: %(asctime)s: %(name)s] %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
class TmpFiles(object):
"""docstring for TmpFiles"""
def __init__(self, files=[]):
super(TmpFiles, self).__init__()
if isinstance(files, list):
self.tmpfiles = files
else:
self.tmpfiles = [files]
def append(self, files):
if isinstance(files, list):
self.tmpfiles += files
else:
self.tmpfiles.append(files)
def cleanup(self):
map(os.remove, self.files)
def cleanup(_, tmp_files):
# Delete files first and the remove directories
for tmp_file in tmp_files:
if os.path.isfile(tmp_file):
LOG.info("Cleaning up... {0}".format(tmp_file))
os.remove(tmp_file)
for tmp_folder in tmp_files:
if os.path.exists(tmp_folder):
LOG.info("Cleaning up... {0}".format(tmp_folder))
shutil.rmtree(tmp_folder)
def write_to_csv(results, description_str=''):
# prevent empty results "None" blocking the writing of CSV files
results = filter(lambda l: l, results)
if results:
if cfg.CSV_HEADER:
df = pd.DataFrame(results, index=zip(*results)[0],
columns=cfg.CSV_HEADER)
else:
df = pd.DataFrame(results, index=zip(*results)[0])
df.to_csv(os.path.join(cfg.OUTPUT_DIR, '{0}_results.csv'.format(
description_str)))
def get_area_def(file_handle):
"""
This function is a utility function to read the area definition
of corresponding to an ice concentration product.
:param file_handle: str
Path to an ice concentration product in NetCDF product.
:return: AreaDefinition
The parsed area definition corresponding to the projection
and area extent of the product.
"""
file_name = os.path.basename(file_handle)
if 'NH25kmEASE2' in file_name:
cfg_id = 'EASE2_NH'
elif 'SH25kmEASE2' in file_name:
cfg_id = 'EASE2_SH'
elif 'nh_ease-125' in file_name:
cfg_id = 'EASE_NH'
elif 'sh_ease-125' in file_name:
cfg_id = 'EASE_SH'
elif 'nh_ease2-250' in file_name:
cfg_id = 'EASE2_NH'
elif 'sh_ease2-250' in file_name:
cfg_id = 'EASE2_SH'
elif 'nic_weekly_' in file_name:
cfg_id = 'NIC_EASE_NH'
elif 'nh_polstere-100' in file_name:
cfg_id = 'OSISAF_NH'
elif 'sh_polstere-100' in file_name:
cfg_id = 'OSISAF_SH'
# TODO: Add this case as soon as I have access to the dataset!
# elif 'nic_weekly_' in file_name:
# cfg_id = 'NIC_EASE_SH'
else:
raise ValueError('No matching region for file {0}'.format(
file_handle))
return pr.utils.parse_area_file('etc/areas.cfg', cfg_id)[0]
def uncompress(compressed_file, target=cfg.TMP_DIR):
"""
This function is a utility function to uncompress NetCDF files in
case they are given that way.
The gzipped original is removed after decompression.
:param product_file: str
Path to a zipped ice concentration product in NetCDF product.
:return: str
The path of an uncompressed NetCDF file.
"""
unpacked_filename, extension = os.path.splitext(compressed_file)
if extension == '.gz':
LOG.info('Unpacking {0}'.format(compressed_file))
if not os.path.isfile(unpacked_filename):
with gzip.open(compressed_file, 'rb') as packed_file:
with open(unpacked_filename, 'wb') as unpacked_file:
unpacked_file.write(packed_file.read())
# os.remove(compressed_file)
return unpacked_filename, []
elif extension == '.zip':
LOG.info('Unpacking {0}'.format(compressed_file))
tmp_id = str(uuid.uuid4())
temporary_files_folder = os.path.join(target, tmp_id)
with open(compressed_file, 'rb') as packed_file:
with ZipFile(packed_file) as z:
for name in z.namelist():
if name.endswith('.shp'):
unpacked_shapefile = os.path.join(
temporary_files_folder, name)
try:
z.extract(name, temporary_files_folder)
except Exception, e:
LOG.exception(e)
LOG.error('Could not uncompress {0}'.format(name))
return unpacked_shapefile, [temporary_files_folder]
else:
return compressed_file, []
def dump_data(ref_time, eval_data, orig_data, orig_file):
hemisphere = 'NH'
if '_sh_' in os.path.basename(orig_file) or \
'_SH_' in os.path.basename(orig_file):
hemisphere = 'SH'
out_path = os.path.join(cfg.OUTPUT_DIR, ref_time)
if not os.path.exists(out_path):
os.makedirs(out_path)
eval_data_img = Image.fromarray(eval_data.astype(np.uint8))
fname = os.path.join(out_path, '{0}_{1}_eval_data.bmp'.format(
cfg.VALIDATION_ID, hemisphere))
eval_data_img.save(fname)
eval_data.dump(fname.replace('.bmp', '.pkl'))
orig_data_img = Image.fromarray(orig_data.astype(np.uint8))
fname = os.path.join(out_path, '{0}_{1}_orig_data.bmp'.format(
cfg.VALIDATION_ID, hemisphere))
orig_data_img.save(fname)
orig_data.dump(fname.replace('.bmp', '.pkl'))
|
HelgeDMI/trollvalidation
|
trollvalidation/validation_utils.py
|
Python
|
apache-2.0
| 5,738
|
import sys
import agent.DeepMindAgent as DM
import Message as M
import GameEnv as GE
import Saver as S
import QtDisplay as Qt
if len(sys.argv) != 2 :
print("Usage: {} <path to rom>".format(sys.argv[0]))
quit()
rom = sys.argv[1]
dbPath = "./data.db"
info = "The agent is running. Type 'stop' to quit"
m = M.Message()
s = S.Saver(dbPath)
e = GE.GameEnv(rom, [84, 84])
qt = Qt.DisplayHandler()
qt.start()
qt.Qt().createPlotter()
qt.Qt().createEnvDisplay(e, 30)
plt = qt.Qt().plotter()
ag = DM.DeepMindAgent.createNewAgent(m, s, plt, e, "Deepmind NIPS agent")
ag.start()
print(info)
m.write(M.Message.TRAIN, None)
while True:
cmd = input()
if cmd == "stop":
break
else:
print(info)
ag.stopProcessing()
m.write(M.Message.QUIT, None)
print("Waiting for the agent to stop ... ", end = "", flush = True)
ag.join()
print("done")
qt.exit()
|
Nimoab/DQN-Deepmind-NIPS-2013
|
Run.py
|
Python
|
gpl-3.0
| 950
|
import xbmc
import xbmcgui
import os
import thread
from threading import Timer
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
YES_NO_DIALOG = 0
CANCEL_RESTART_DIALOG = 1
class ConfirmDialogXML(xbmcgui.WindowXMLDialog):
def onInit(self):
self.IsInitDone = False
self.labelControl = self.getControl(4110);
self.yesNoDialog = self.getControl(4101);
self.countDownLabelControl = self.getControl(4111);
self.cancelRestartDialog = self.getControl(4104);
self.okDialog = self.getControl(4107);
self.clickid = -1
self.labelControl.setLabel(self.__text);
self.yesNoDialog.setVisible(False);
self.cancelRestartDialog.setVisible(False);
self.okDialog.setVisible(False);
self.countDownLabelControl.setVisible(False);
if self.__dialogType == ConfirmDialogType.yesNo:
self.yesNoDialog.setVisible(True);
self.setFocusId(4102);
elif self.__dialogType == ConfirmDialogType.cancelRestart:
self.cancelRestartDialog.setVisible(True);
self.setFocusId(4105);
elif self.__dialogType == ConfirmDialogType.ok:
self.okDialog.setVisible(True);
self.setFocusId(4108);
if self.__timerValue > 0:
self.countDown = self.__timerValue
self.countDownLabelControl.setLabel(str(self.countDown))
self.countDownLabelControl.setVisible(True);
self.lock = thread.allocate_lock()
self.countDownTimer = Timer(1.0, self.countDownTimerCallback)
self.countDownTimer.start()
self.IsInitDone = True
def countDownTimerCallback(self):
self.lock.acquire()
try:
if self.IsInitDone:
if self.countDown > 0:
self.countDown = self.countDown - 1;
self.countDownLabelControl.setLabel(str(self.countDown))
#print self.__statusCallback
if self.__statusCallback is not None:
autoClose = self.__statusCallback(self.labelControl)
if autoClose:
self.clickid = 1
self.close()
return
self.countDownTimer = Timer(1.0, self.countDownTimerCallback)
self.countDownTimer.start()
else:
self.clickid = 1
self.close()
except:
print("closing countDownTimer")
finally:
#print("finally countDownTimer")
self.lock.release()
def onAction(self, action):
#print "ACtion %s" % action.getId()
if action == ACTION_SELECT_ITEM:
self.buttonClickHandler(self.getFocusId())
def onClick(self, controlID):
#print controlID
self.buttonClickHandler(controlID)
def buttonClickHandler(self, controlId):
if controlId == 4102 or controlId == 4105:
self.clickid = 0
elif controlId == 4103 or controlId == 4106:
self.clickid = 1
elif controlId == 4108:
self.clickid = 1
if self.__timerValue > 0:
self.lock.acquire()
self.IsInitDone = False
if self.__timerValue > 0:
self.lock.release()
self.countDownTimer.cancel()
self.close()
def setText(self, text):
self.__text = text;
def setDialogType(self, id):
self.__dialogType = id
def setTimer(self, timerValue):
self.__timerValue = timerValue
def setStatusCallback(self, statusCallback):
self.__statusCallback = statusCallback
class ConfirmDialog(object):
def doModal(self, text, dialogType, timerValue):
mywin = ConfirmDialogXML("confirmdialog.xml",os.getcwd())
mywin.setText(text)
mywin.setDialogType(dialogType)
mywin.setTimer(timerValue)
mywin.setStatusCallback(None)
mywin.doModal()
clickid = mywin.clickid
del mywin
return clickid
def doModalWithCallback(self, text, dialogType, timerValue, statusCallback):
mywin = ConfirmDialogXML("confirmdialog.xml",os.getcwd())
mywin.setText(text)
mywin.setDialogType(dialogType)
mywin.setTimer(timerValue)
mywin.setStatusCallback(statusCallback)
mywin.doModal()
clickid = mywin.clickid
del mywin
return clickid
class ConfirmDialogType(object):
yesNo = 0
cancelRestart = 1
ok = 2
|
memphisx/alphauiplus
|
skin.alienware/scripts/confirmdialog.py
|
Python
|
gpl-2.0
| 4,637
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^simpyl_pastebin/', include('simpyl_pastebin.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
#(r'^static/(?P<path>.*)$', 'django.views.static.serve',
#{'document_root': '/static'}),
(r'^$', 'simpyl_pastebin.pastebin.views.main'),
(r'', 'simpyl_pastebin.pastebin.views.fetch_paste'),
)
|
excid3/simpyl_pastebin
|
urls.py
|
Python
|
gpl-3.0
| 1,542
|
import time
from threading import Timer
def loop(period, duration, callback):
t0 = time.time()
while (time.time()-t0) < duration:
timer = Timer(period, callback)
timer.start()
timer.join()
class Scheduler(object):
def __init__(self,prog, sampling_period, sampling_duration):
self.prog = prog
self.sampling_period = sampling_period
self.sampling_duration = sampling_duration
def update_loop(self, sampling_period = None, sampling_duration = None):
if sampling_period is None:
sampling_period = self.sampling_period
if sampling_duration is None:
sampling_duration = self.sampling_duration
update_callback = lambda: self.prog.send_event("UPDATE")
loop(period=sampling_period, duration=sampling_duration, callback=update_callback)
def run_gradient_schedule(self,gradients, sampling_period = None, sampling_duration = None):
self.prog.print_back("running gradient schedule: %r" % list(gradients))
self.prog.print_back("sampling every %0.1f seconds for a duration of %0.1f seconds" % (self.sampling_period,self.sampling_duration))
for grad in gradients:
self.prog.print_back("setting gradient: %0.2f" % grad)
self.prog.app.change_gradient_setpoint(grad)
self.update_loop(sampling_period=sampling_period,sampling_duration=sampling_duration)
|
cversek/peltiator
|
python/src/peltiator/apps/peltier_test/lib/application/scheduler.py
|
Python
|
mit
| 1,440
|
from __future__ import print_function, unicode_literals
import unittest
from binascii import unhexlify # , hexlify
from hkdf import Hkdf
# def generate_KAT():
# print("KAT = [")
# for salt in (b"", b"salt"):
# for context in (b"", b"context"):
# skm = b"secret"
# out = HKDF(skm, 64, XTS=salt, CTXinfo=context)
# hexout = " '%s' +\n '%s'" % (hexlify(out[:32]),
# hexlify(out[32:]))
# print(" (%r, %r, %r,\n%s)," % (salt, context, skm, hexout))
# print("]")
KAT = [
('', '', 'secret',
'2f34e5ff91ec85d53ca9b543683174d0cf550b60d5f52b24c97b386cfcf6cbbf' +
'9cfd42fd37e1e5a214d15f03058d7fee63dc28f564b7b9fe3da514f80daad4bf'),
('', 'context', 'secret',
'c24c303a1adfb4c3e2b092e6254ed481c41d8955ba8ec3f6a1473493a60c957b' +
'31b723018ca75557214d3d5c61c0c7a5315b103b21ff00cb03ebe023dc347a47'),
('salt', '', 'secret',
'f1156507c39b0e326159e778696253122de430899a8df2484040a85a5f95ceb1' +
'dfca555d4cc603bdf7153ed1560de8cbc3234b27a6d2be8e8ca202d90649679a'),
('salt', 'context', 'secret',
'61a4f201a867bcc12381ddb180d27074408d03ee9d5750855e5a12d967fa060f' +
'10336ead9370927eaabb0d60b259346ee5f57eb7ceba8c72f1ed3f2932b1bf19'),
]
class TestKAT(unittest.TestCase):
# note: this uses SHA256
def test_kat(self):
for (salt, context, skm, expected_hexout) in KAT:
expected_out = unhexlify(expected_hexout)
for outlen in range(0, len(expected_out)):
out = Hkdf(salt.encode("ascii"), skm.encode("ascii")).expand(
context.encode("ascii"), outlen)
self.assertEqual(out, expected_out[:outlen])
# if __name__ == '__main__':
# generate_KAT()
|
warner/magic-wormhole
|
src/wormhole/test/test_hkdf.py
|
Python
|
mit
| 1,791
|
"""
This program demonstrates how unidentified keyword
arguments are handled/passed as a dictionary. The receiving
identifier is preceded by two asterisks.
"""
def gets(**kwargs):
print type(kwargs), len(kwargs), '\n', kwargs
for i in kwargs:
print i, kwargs[i], type(kwargs[i])
x = 12.34
y = 'string'
gets(a=1234, b=x+1, c=y+'s', d=12.34)
|
sharkySharks/PythonForDevs
|
DemoProgs/varykwargs.py
|
Python
|
mit
| 385
|
########################################################################
#
# File: fields.py
# Author: Alex Samuel
# Date: 2001-03-05
#
# Contents:
# General type system for user-defined data constructs.
#
# Copyright (c) 2001, 2002, 2003 by CodeSourcery, LLC. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
"""A 'Field' determines how data is displayed and stored.
A 'Field' is a component of a data structure. Every 'Field' has a type.
For example, an 'IntegerField' stores a signed integer while a
'TextField' stores a string.
The value of a 'Field' can be represented as HTML (for display in the
GUI), or as XML (when written to persistent storage). Every 'Field' can
create an HTML form that can be used by the user to update the value of
the 'Field'.
Every 'Extension' class has a set of arguments composed of 'Field'. An
instance of that 'Extension' class can be constructed by providing a
value for each 'Field' object. The GUI can display the 'Extension'
object by rendering each of the 'Field' values as HTML. The user can
change the value of a 'Field' in the GUI, and then write the 'Extension'
object to persistent storage.
Additional derived classes of 'Field' can be created for use in
domain-specific situations. For example, the QMTest 'Test' class
defines a derived class which allows the user to select from among a set
of test names."""
########################################################################
# imports
########################################################################
import attachment
import common
import formatter
import htmllib
import os
import re
import qm
import string
import StringIO
import structured_text
import sys
import time
import tokenize
import types
import urllib
import web
import xml.dom
import xmlutil
########################################################################
# classes
########################################################################
class Field(object):
"""A 'Field' is a named, typed component of a data structure."""
form_field_prefix = "_field_"
def __init__(self,
name = "",
default_value = None,
title = "",
description = "",
hidden = "false",
read_only = "false",
computed = "false"):
"""Create a new (generic) field.
'name' -- The name of the field.
'default_value' -- The default value for this field.
'title' -- The name given this field when it is displayed in
user interfaces.
'description' -- A string explaining the purpose of this field.
The 'description' must be provided as structured text. The
first line of the structured text must be a one-sentence
description of the field; that line is extracted by
'GetBriefDescription'.
'hidden' -- If true, this field is for internal puprpose only
and is not shown in user interfaces.
'read_only' -- If true, this field may not be modified by users.
'computed' -- If true, this field is computed automatically.
All computed fields are implicitly hidden and implicitly
read-only.
The boolean parameters (such as 'hidden') use the convention
that true is represented by the string '"true"'; any other value
is false. This convention is a historical artifact."""
self.__name = name
# Use the name as the title, if no other was specified.
if not title:
self.__title = name
else:
self.__title = title
self.__description = description
self.__hidden = hidden == "true"
self.__read_only = read_only == "true"
self.__computed = computed == "true"
# All computed fields are also read-only and hidden.
if (self.IsComputed()):
self.__read_only = 1
self.__hidden = 1
self.__default_value = default_value
def SetName(self, name):
"""Set the name of the field."""
# We assume that if title==name the title
# was not given and so defaulted to name.
# Keep it in sync with name in that case.
if (self.__name == self.__title):
self.__title = name
self.__name = name
def GetName(self):
"""Return the name of the field."""
return self.__name
def GetDefaultValue(self):
"""Return the default value for this field."""
return common.copy(self.__default_value)
def GetTitle(self):
"""Return the user-friendly title of the field."""
return self.__title
def GetDescription(self):
"""Return a description of this field.
This description is used when displaying detailed help
information about the field."""
return self.__description
def GetBriefDescription(self):
"""Return a brief description of this field.
This description is used when prompting for input, or when
displaying the current value of the field."""
# Get the complete description.
description = self.GetDescription()
# Return the first paragraph.
return structured_text.get_first(description)
def GetHelp(self):
"""Generate help text about this field in structured text format."""
raise NotImplementedError
def GetHtmlHelp(self, edit=0):
"""Generate help text about this field in HTML format.
'edit' -- If true, display information about editing controls
for this field."""
description = structured_text.to_html(self.GetDescription())
help = structured_text.to_html(self.GetHelp())
return '''
<h3>%s</h3>
<h4>About This Field</h4>
%s
<hr noshade size="2">
<h4>About This Field\'s Values</h4>
%s
<hr noshade size="2">
<p>Refer to this field as <tt>%s</tt> in Python expressions.</p>
''' % (self.GetTitle(), description, help, self.GetName(), )
def GetSubfields(self):
"""Returns the sequence of subfields contained in this field.
returns -- The sequence of subfields contained in this field.
If there are no subfields, an empty sequence is returned."""
return ()
def IsComputed(self):
"""Returns true if this field is computed automatically.
returns -- True if this field is computed automatically. A
computed field is never displayed to users and is not stored
should not be stored; the class containing the field is
responsible for recomputing it as necessary."""
return self.__computed
def IsHidden(self):
"""Returns true if this 'Field' should be hidden from users.
returns -- True if this 'Field' should be hidden from users.
The value of a hidden field is not displayed in the GUI."""
return self.__hidden
def IsReadOnly(self):
"""Returns true if this 'Field' cannot be modified by users.
returns -- True if this 'Field' cannot be modified by users.
The GUI does not allow users to modify a read-only field."""
return self.__read_only
### Output methods.
def FormatValueAsText(self, value, columns=72):
"""Return a plain text rendering of a 'value' for this field.
'columns' -- The maximum width of each line of text.
returns -- A plain-text string representing 'value'."""
# Create a file to hold the result.
text_file = StringIO.StringIO()
# Format the field as HTML.
html_file = StringIO.StringIO(self.FormatValueAsHtml(None,
value,
"brief"))
# Turn the HTML into plain text.
parser = htmllib.HTMLParser(formatter.AbstractFormatter
(formatter.DumbWriter(text_file,
maxcol = columns)))
parser.feed(html_file)
parser.close()
text = text_file.getValue()
# Close the files.
html_file.close()
text_file.close()
return text
def FormatValueAsHtml(self, server, value, style, name=None):
"""Return an HTML rendering of a 'value' for this field.
'server' -- The 'WebServer' in which the HTML will be
displayed.
'value' -- The value for this field. May be 'None', which
renders a default value (useful for blank forms).
'style' -- The rendering style. Can be "full" or "brief" (both
read-only), or "new" or "edit" or "hidden".
'name' -- The name to use for the primary HTML form element
containing the value of this field, if 'style' specifies the
generation of form elements. If 'name' is 'None', the value
returned by 'GetHtmlFormFieldName()' should be used.
returns -- A string containing the HTML representation of
'value'."""
raise NotImplementedError
def MakeDomNodeForValue(self, value, document):
"""Generate a DOM element node for a value of this field.
'value' -- The value to represent.
'document' -- The containing DOM document node."""
raise NotImplementedError
### Input methods.
def Validate(self, value):
"""Validate a field value.
For an acceptable type and value, return the representation of
'value' in the underlying field storage.
'value' -- A value to validate for this field.
returns -- If the 'value' is valid, returns 'value' or an
equivalent "canonical" version of 'value'. (For example, this
function may search a hash table and return an equivalent entry
from the hash table.)
This function must raise an exception if the value is not valid.
The string representation of the exception will be used as an
error message in some situations.
Implementations of this method must be idempotent."""
raise NotImplementedError
def ParseTextValue(self, value):
"""Parse a value represented as a string.
'value' -- A string representing the value.
returns -- The corresponding field value. The value returned
should be processed by 'Validate' to ensure that it is valid
before it is returned."""
raise NotImplemented
def ParseFormValue(self, request, name, attachment_stores):
"""Convert a value submitted from an HTML form.
'request' -- The 'WebRequest' containing a value corresponding
to this field.
'name' -- The name corresponding to this field in the 'request'.
'attachment_stores' -- A dictionary mapping 'AttachmentStore' ids
(in the sense of Python's 'id' built-in) to the
'AttachmentStore's themselves.
returns -- A pair '(value, redisplay)'. 'value' is the value
for this field, as indicated in 'request'. 'redisplay' is true
if and only if the form should be redisplayed, rather than
committed. If an error occurs, an exception is thrown."""
# Retrieve the value provided in the form.
value = request[name]
# Treat the result as we would if it were provided on the
# command-line.
return (self.ParseTextValue(value), 0)
def GetValueFromDomNode(self, node, attachment_store):
"""Return a value for this field represented by DOM 'node'.
This method does not validate the value for this particular
instance; it only makes sure the node is well-formed, and
returns a value of the correct Python type.
'node' -- The DOM node that is being evaluated.
'attachment_store' -- For attachments, the store that should be
used.
If the 'node' is incorrectly formed, this method should raise an
exception."""
raise NotImplementedError
# Other methods.
def GetHtmlFormFieldName(self):
"""Return the form field name corresponding this field.
returns -- A string giving the name that should be used for this
field when used in an HTML form."""
return self.form_field_prefix + self.GetName()
def __repr__(self):
# This output format is more useful when debugging than the
# default "<... instance at 0x...>" format provided by Python.
return "<%s %s>" % (self.__class__, self.GetName())
########################################################################
class IntegerField(Field):
"""An 'IntegerField' stores an 'int' or 'long' object."""
def __init__(self, name="", default_value=0, **properties):
"""Construct a new 'IntegerField'.
'name' -- As for 'Field.__init__'.
'default_value' -- As for 'Field.__init__'.
'properties' -- Other keyword arguments for 'Field.__init__'."""
# Perform base class initialization.
super(IntegerField, self).__init__(name, default_value, **properties)
def GetHelp(self):
return """This field stores an integer.
The default value of this field is %d."""
### Output methods.
def FormatValueAsText(self, value, columns=72):
return str(value)
def FormatValueAsHtml(self, server, value, style, name=None):
# Use default value if requested.
if value is None:
value = self.GetDefaultValue()
# Use the default field form field name if requested.
if name is None:
name = self.GetHtmlFormFieldName()
if style == "new" or style == "edit":
return '<input type="text" size="8" name="%s" value="%d" />' \
% (name, value)
elif style == "full" or style == "brief":
return '<tt>%d</tt>' % value
elif style == "hidden":
return '<input type="hidden" name="%s" value="%d" />' \
% (name, value)
else:
assert None
def MakeDomNodeForValue(self, value, document):
return xmlutil.create_dom_text_element(document, "integer",
str(value))
### Input methods.
def Validate(self, value):
if not isinstance(value, (int, long)):
raise ValueError, value
return value
def ParseTextValue(self, value):
try:
return self.Validate(int(value))
except:
raise qm.common.QMException, \
qm.error("invalid integer field value")
def GetValueFromDomNode(self, node, attachment_store):
# Make sure 'node' is an '<integer>' element.
if node.nodeType != xml.dom.Node.ELEMENT_NODE \
or node.tagName != "integer":
raise qm.QMException, \
qm.error("dom wrong tag for field",
name=self.GetName(),
right_tag="integer",
wrong_tag=node.tagName)
# Retrieve the contained text.
value = xmlutil.get_dom_text(node)
# Convert it to an integer.
return self.ParseTextValue(value)
########################################################################
class TextField(Field):
"""A field that contains text."""
def __init__(self,
name = "",
default_value = "",
multiline = "false",
structured = "false",
verbatim = "false",
not_empty_text = "false",
**properties):
"""Construct a new 'TextField'.
'multiline' -- If false, a value for this field is a single line
of text. If true, multi-line text is allowed.
'structured' -- If true, the field contains structured text.
'verbatim' -- If true, the contents of the field are treated as
preformatted text.
'not_empty_text' -- The value of this field is considered
invalid if it empty or composed only of whitespace.
'properties' -- A dictionary of other keyword arguments which
are provided to the base class constructor."""
# Initialize the base class.
super(TextField, self).__init__(name, default_value, **properties)
self.__multiline = multiline == "true"
self.__structured = structured == "true"
self.__verbatim = verbatim == "true"
self.__not_empty_text = not_empty_text == "true"
def GetHelp(self):
help = """
A text field. """
if self.__structured:
help = help + '''
The text is interpreted as structured text, and formatted
appropriately for the output device. See "Structured Text
Formatting
Rules":http://www.python.org/sigs/doc-sig/stext.html for
more information. '''
elif self.__verbatim:
help = help + """
The text is stored verbatim; whitespace and indentation are
preserved. """
if self.__not_empty_text:
help = help + """
This field may not be empty. """
help = help + """
The default value of this field is "%s".
""" % self.GetDefaultValue()
return help
### Output methods.
def FormatValueAsText(self, value, columns=72):
if self.__structured:
return structured_text.to_text(value, width=columns)
elif self.__verbatim:
return value
else:
return common.wrap_lines(value, columns)
def FormatValueAsHtml(self, server, value, style, name=None):
# Use default value if requested.
if value is None:
value = ""
else:
value = str(value)
# Use the default field form field name if requested.
if name is None:
name = self.GetHtmlFormFieldName()
if style == "new" or style == "edit":
if self.__multiline:
result = '<textarea cols="64" rows="8" name="%s">' \
'%s</textarea>' \
% (name, web.escape(value))
else:
result = \
'<input type="text" size="40" name="%s" value="%s" />' \
% (name, web.escape(value))
# If this is a structured text field, add a note to that
# effect, so users aren't surprised.
if self.__structured:
result = result \
+ '<br><font size="-1">This is a ' \
+ qm.web.make_help_link_html(
qm.structured_text.html_help_text,
"structured text") \
+ 'field.</font>'
return result
elif style == "hidden":
return '<input type="hidden" name="%s" value="%s" />' \
% (name, web.escape(value))
elif style == "brief":
if self.__structured:
# Use only the first line of text.
value = string.split(value, "\n", 1)
value = web.format_structured_text(value[0])
else:
# Replace all whitespace with ordinary space.
value = re.sub(r"\s", " ", value)
# Truncate to 80 characters, if it's longer.
if len(value) > 80:
value = value[:80] + "..."
if self.__verbatim:
# Put verbatim text in a <tt> element.
return '<tt>%s</tt>' % web.escape(value)
elif self.__structured:
# It's already formatted as HTML; don't escape it.
return value
else:
# Other text set normally.
return web.escape(value)
elif style == "full":
if self.__verbatim:
# Wrap lines before escaping special characters for
# HTML. Use a special tag to indicate line breaks. If
# we were to escape first, line lengths would be
# computed using escape codes rather than visual
# characters.
break_delimiter = "#@LINE$BREAK@#"
value = common.wrap_lines(value, columns=80,
break_delimiter=break_delimiter)
# Now escape special characters.
value = web.escape(value)
# Replace the line break tag with visual indication of
# the break.
value = string.replace(value,
break_delimiter, r"<blink>\</blink>")
# Place verbatim text in a <pre> element.
return '<pre>%s</pre>' % value
elif self.__structured:
return web.format_structured_text(value)
else:
if value == "":
# Browsers don't deal nicely with empty table cells,
# so put an extra space here.
return " "
else:
return web.escape(value)
else:
raise ValueError, style
def MakeDomNodeForValue(self, value, document):
return xmlutil.create_dom_text_element(document, "text", value)
### Input methods.
def Validate(self, value):
if not isinstance(value, types.StringTypes):
raise ValueError, value
# Clean up unless it's a verbatim string.
if not self.__verbatim:
# Remove leading whitespace.
value = string.lstrip(value)
# If this field has the not_empty_text property set, make sure the
# value complies.
if self.__not_empty_text and value == "":
raise ValueError, \
qm.error("empty text field value",
field_title=self.GetTitle())
# If this is not a multi-line text field, remove line breaks
# (and surrounding whitespace).
if not self.__multiline:
value = re.sub(" *\n+ *", " ", value)
return value
def ParseFormValue(self, request, name, attachment_stores):
# HTTP specifies text encodings are CR/LF delimited; convert to
# the One True Text Format (TM).
return (self.ParseTextValue(qm.convert_from_dos_text(request[name])),
0)
def ParseTextValue(self, value):
return self.Validate(value)
def GetValueFromDomNode(self, node, attachment_store):
# Make sure 'node' is a '<text>' element.
if node.nodeType != xml.dom.Node.ELEMENT_NODE \
or node.tagName != "text":
raise qm.QMException, \
qm.error("dom wrong tag for field",
name=self.GetName(),
right_tag="text",
wrong_tag=node.tagName)
return self.Validate(xmlutil.get_dom_text(node))
########################################################################
class TupleField(Field):
"""A 'TupleField' contains zero or more other 'Field' objects.
The contained 'Field' objects may have different types. The value
of a 'TupleField' is a Python list; the values in the list
correspond to the values of the contained 'Field' objects. For
example, '["abc", 3]' would be a valid value for a 'TupleField'
containing a 'TextField' and an 'IntegerField'."""
def __init__(self, name = "", fields = None, **properties):
"""Construct a new 'TupleField'.
'name' -- The name of the field.
'fields' -- A sequence of 'Field' instances.
The new 'TupleField' stores a list whose elements correspond to
the 'fields'."""
self.__fields = fields == None and [] or fields
default_value = map(lambda f: f.GetDefaultValue(), self.__fields)
Field.__init__(self, name, default_value, **properties)
def GetHelp(self):
help = ""
need_space = 0
for f in self.__fields:
if need_space:
help += "\n"
else:
need_space = 1
help += "** " + f.GetTitle() + " **\n\n"
help += f.GetHelp()
return help
def GetSubfields(self):
return self.__fields
### Output methods.
def FormatValueAsHtml(self, server, value, style, name = None):
# Use the default name if none is specified.
if name is None:
name = self.GetHtmlFormFieldName()
# Format the field as a multi-column table.
html = '<table border="0" cellpadding="0">\n <tr>\n'
for f, v in map(None, self.__fields, value):
element_name = name + "_" + f.GetName()
html += " <td><b>" + f.GetTitle() + "</b>:</td>\n"
html += (" <td>\n"
+ f.FormatValueAsHtml(server, v, style, element_name)
+ " </td>\n")
html += " </tr>\n</table>\n"
return html
def MakeDomNodeForValue(self, value, document):
element = document.createElement("tuple")
for f, v in map(None, self.__fields, value):
element.appendChild(f.MakeDomNodeForValue(v, document))
return element
### Input methods.
def Validate(self, value):
assert len(value) == len(self.__fields)
return map(lambda f, v: f.Validate(v),
self.__fields, value)
def ParseFormValue(self, request, name, attachment_stores):
value = []
redisplay = 0
for f in self.__fields:
v, r = f.ParseFormValue(request, name + "_" + f.GetName(),
attachment_stores)
value.append(v)
if r:
redisplay = 1
# Now that we've computed the value of the entire tuple, make
# sure it is valid.
value = self.Validate(value)
return (value, redisplay)
def GetValueFromDomNode(self, node, attachment_store):
values = []
for f, element in map(None, self.__fields, node.childNodes):
values.append(f.GetValueFromDomNode(element, attachment_store))
return self.Validate(values)
class DictionaryField(Field):
"""A 'DictionaryField' maps keys to values."""
def __init__(self, key_field, value_field, **properties):
"""Construct a new 'DictionaryField'.
'key_field' -- The key field.
'value_field' -- The value field.
"""
self.__key_field = key_field
self.__value_field = value_field
super(DictionaryField, self).__init__(**properties)
def GetHelp(self):
help = """
A dictionary field. A dictionary maps keys to values. The key type:
%s
The value type:
%s"""%(self.__key_field.GetHelp(), self.__value_field.GetHelp())
return help
def GetKeyField(self): return self.__key_field
def GetValueField(self): return self.__value_field
### Output methods.
def FormatValueAsHtml(self, server, content, style, name = None):
if content is None:
content = {}
# Use the default name if none is specified.
if name is None:
name = self.GetHtmlFormFieldName()
if style == 'brief' or style == 'full':
if len(content) == 0:
# An empty set.
return 'None'
body = ['<th>%s</th><td>%s</td>\n'
%(self.__key_field.FormatValueAsHtml(server, key, style),
self.__value_field.FormatValueAsHtml(server, value, style))
for (key, value) in content.iteritems()]
return '<table><tr>%s</tr>\n</table>\n'%'</tr>\n<tr>'.join(body)
elif style in ['new', 'edit', 'hidden']:
html = ''
if content:
# Create a table to represent the dictionary -- but only if it is
# non-empty. A table with no body is invalid HTML.
html += ('<table border="0" cellpadding="0" cellspacing="0">'
'\n <tbody>\n')
element_number = 0
for key, value in content.iteritems():
html += ' <tr>\n <td>'
element_name = name + '_%d' % element_number
checkbox_name = element_name + "_remove"
if style == 'edit':
html += ('<input type="checkbox" name="%s" /></td>\n'
' <td>\n'
% checkbox_name)
element_name = name + '_key_%d' % element_number
html += (' <th>%s</th>\n'
%self.__key_field.FormatValueAsHtml(server, key,
style,
element_name))
element_name = name + '_value_%d' % element_number
html += (' <td>%s</td>\n'
%self.__value_field.FormatValueAsHtml(server, value,
style,
element_name))
html += ' </tr>\n'
element_number += 1
html += ' </tbody>\n</table>\n'
# The action field is used to keep track of whether the
# "Add" or "Remove" button has been pushed. It would be
# much nice if we could use JavaScript to update the
# table, but Netscape 4, and even Mozilla 1.0, do not
# permit that. Therefore, we have to go back to the server.
html += '<input type="hidden" name="%s" value="" />' % name
html += ('<input type="hidden" name="%s_count" value="%d" />'
% (name, len(content)))
if style != 'hidden':
html += ('<table border="0" cellpadding="0" cellspacing="0">\n'
' <tbody>\n'
' <tr>\n'
' <td><input type="button" name="%s_add" '
'value="Add Another" '
'''onclick="%s.value = 'add'; submit();" />'''
'</td>\n'
' <td><input type="button" name="%s_remove"'
'value="Remove Selected" '
'''onclick="%s.value = 'remove'; submit();" />'''
'</td>\n'
' </tr>'
' </tbody>'
'</table>'
% (name, name, name, name))
return html
def MakeDomNodeForValue(self, value, document):
element = document.createElement('dictionary')
for k, v in value.iteritems():
item = element.appendChild(document.createElement('item'))
item.appendChild(self.__key_field.MakeDomNodeForValue(k, document))
item.appendChild(self.__value_field.MakeDomNodeForValue(v, document))
return element
### Input methods.
def Validate(self, value):
valid = {}
for k, v in value.items():
valid[self.__key_field.Validate(k)] = self.__value_field.Validate(v)
return valid
def ParseTextValue(self, value):
raise NotImplementedError
def ParseFormValue(self, request, name, attachment_stores):
content = {}
redisplay = 0
action = request[name]
for i in xrange(int(request[name + '_count'])):
if not (action == 'remove'
and request.get(name + '_%d_remove'%i) == 'on'):
key, rk = self.__key_field.ParseFormValue(request,
name + '_key_%d'%i,
attachment_stores)
value, rv = self.__value_field.ParseFormValue(request,
name + '_value_%d'%i,
attachment_stores)
content[key] = value
if rk or rv:
redisplay = 1
# Remove entries from the request that might cause confusion
# when the page is redisplayed.
names = []
for n, v in request.items():
if n[:len(name)] == name:
names.append(n)
for n in names:
del request[n]
content = self.Validate(content)
if action == 'add':
redisplay = 1
content[self.__key_field.GetDefaultValue()] =\
self.__value_field.GetDefaultValue()
elif action == 'remove':
redisplay = 1
return (content, redisplay)
def GetValueFromDomNode(self, node, attachment_store):
values = {}
for item in node.childNodes:
if item.nodeType == xml.dom.Node.ELEMENT_NODE:
# No mixed content !
# We are only interested into element child-nodes.
children = [c for c in item.childNodes
if c.nodeType == xml.dom.Node.ELEMENT_NODE]
values[self.__key_field.GetValueFromDomNode
(children[0], attachment_store)] =\
self.__value_field.GetValueFromDomNode(children[1],
attachment_store)
return self.Validate(values)
class SetField(Field):
"""A field containing zero or more instances of some other field.
All contents must be of the same field type. A set field may not
contain sets.
The default field value is set to an empty set."""
def __init__(self, contained, not_empty_set = "false", default_value = None,
**properties):
"""Create a set field.
The name of the contained field is taken as the name of this
field.
'contained' -- An 'Field' instance describing the
elements of the set.
'not_empty_set' -- If true, this field may not be empty,
i.e. the value of this field must contain at least one element.
raises -- 'ValueError' if 'contained' is a set field.
raises -- 'TypeError' if 'contained' is not a 'Field'."""
if not properties.has_key('description'):
properties['description'] = contained.GetDescription()
super(SetField, self).__init__(
contained.GetName(),
default_value or [],
title = contained.GetTitle(),
**properties)
# A set field may not contain a set field.
if isinstance(contained, SetField):
raise ValueError, \
"A set field may not contain a set field."
if not isinstance(contained, Field):
raise TypeError, "A set must contain another field."
# Remeber the contained field type.
self.__contained = contained
self.__not_empty_set = not_empty_set == "true"
def GetHelp(self):
return """
A set field. A set contains zero or more elements, all of the
same type. The elements of the set are described below:
""" + self.__contained.GetHelp()
def GetSubfields(self):
return (self.__contained,)
def GetHtmlHelp(self, edit=0):
help = Field.GetHtmlHelp(self)
if edit:
# In addition to the standard generated help, include
# additional instructions about using the HTML controls.
help = help + """
<hr noshade size="2">
<h4>Modifying This Field</h4>
<p>Add a new element to the set by clicking the
<i>Add</i> button. The new element will have a default
value until you change it. To remove elements from the
set, select them by checking the boxes on the left side of
the form. Then, click the <i>Remove</i> button.</p>
"""
return help
### Output methods.
def FormatValueAsText(self, value, columns=72):
# If the set is empty, indicate this specially.
if len(value) == 0:
return "None"
# Format each element of the set, and join them into a
# comma-separated list.
contained_field = self.__contained
formatted_items = []
for item in value:
formatted_item = contained_field.FormatValueAsText(item, columns)
formatted_items.append(repr(formatted_item))
result = "[ " + string.join(formatted_items, ", ") + " ]"
return qm.common.wrap_lines(result, columns)
def FormatValueAsHtml(self, server, value, style, name=None):
# Use default value if requested.
if value is None:
value = []
# Use the default field form field name if requested.
if name is None:
name = self.GetHtmlFormFieldName()
contained_field = self.__contained
if style == "brief" or style == "full":
if len(value) == 0:
# An empty set.
return "None"
formatted \
= map(lambda v: contained_field.FormatValueAsHtml(server,
v, style),
value)
if style == "brief":
# In the brief style, list elements separated by commas.
separator = ", "
else:
# In the full style, list elements one per line.
separator = "<br>\n"
return string.join(formatted, separator)
elif style in ["new", "edit", "hidden"]:
html = ""
if value:
# Create a table to represent the set -- but only if the set is
# non-empty. A table with no body is invalid HTML.
html += ('<table border="0" cellpadding="0" cellspacing="0">'
"\n <tbody>\n")
element_number = 0
for element in value:
html += " <tr>\n <td>"
element_name = name + "_%d" % element_number
checkbox_name = element_name + "_remove"
if style == "edit":
html += \
('<input type="checkbox" name="%s" /></td>\n'
' <td>\n'
% checkbox_name)
html += contained_field.FormatValueAsHtml(server,
element,
style,
element_name)
html += " </td>\n </tr>\n"
element_number += 1
html += " </tbody>\n</table>\n"
# The action field is used to keep track of whether the
# "Add" or "Remove" button has been pushed. It would be
# much nice if we could use JavaScript to update the
# table, but Netscape 4, and even Mozilla 1.0, do not
# permit that. Therefore, we have to go back to the server.
html += '<input type="hidden" name="%s" value="" />' % name
html += ('<input type="hidden" name="%s_count" value="%d" />'
% (name, len(value)))
if style != "hidden":
html += ('<table border="0" cellpadding="0" cellspacing="0">\n'
' <tbody>\n'
' <tr>\n'
' <td><input type="button" name="%s_add" '
'value="Add Another" '
'''onclick="%s.value = 'add'; submit();" />'''
'</td>\n'
' <td><input type="button" name="%s_remove"'
'value="Remove Selected" '
'''onclick="%s.value = 'remove'; submit();" />'''
'</td>\n'
' </tr>'
' </tbody>'
'</table>'
% (name, name, name, name))
return html
def MakeDomNodeForValue(self, value, document):
# Create a set element.
element = document.createElement("set")
# Add a child node for each item in the set.
contained_field = self.__contained
for item in value:
# The contained field knows how to make a DOM node for each
# item in the set.
item_node = contained_field.MakeDomNodeForValue(item, document)
element.appendChild(item_node)
return element
### Input methods.
def Validate(self, value):
# If this field has the not_empty_set property set, make sure
# the value complies.
if self.__not_empty_set and len(value) == 0:
raise ValueError, \
qm.error("empty set field value",
field_title=self.GetTitle())
# Assume 'value' is a sequence. Copy it, simultaneously
# validating each element in the contained field.
return map(lambda v: self.__contained.Validate(v),
value)
def ParseTextValue(self, value):
def invalid(tok):
"""Raise an exception indicating a problem with 'value'.
'tok' -- A token indicating the position of the problem.
This function does not return; instead, it raises an
appropriate exception."""
raise qm.QMException, \
qm.error("invalid set value", start = value[tok[2][1]:])
# Use the Python parser to handle the elements of the set.
s = StringIO.StringIO(value)
g = tokenize.generate_tokens(s.readline)
# Read the opening square bracket.
tok = g.next()
if tok[0] != tokenize.OP or tok[1] != "[":
invalid(tok)
# There are no elements yet.
elements = []
# Keep going until we find the closing bracket.
while 1:
# If we've reached the closing bracket, the set is
# complete.
tok = g.next()
if tok[0] == tokenize.OP and tok[1] == "]":
break
# If this is not the first element of the set, there should
# be a comma before the next element.
if elements:
if tok[0] != tokenize.OP or tok[1] != ",":
invalid(tok)
tok = g.next()
# The next token should be a string constant.
if tok[0] != tokenize.STRING:
invalid(tok)
# Parse the string constant.
v = eval(tok[1])
elements.append(self.__contained.ParseTextValue(v))
# There should not be any tokens left over.
tok = g.next()
if not tokenize.ISEOF(tok[0]):
invalid(tok)
return self.Validate(elements)
def ParseFormValue(self, request, name, attachment_stores):
values = []
redisplay = 0
# See if the user wants to add or remove elements from the set.
action = request[name]
# Loop over the entries for each of the elements, adding them to
# the set.
contained_field = self.__contained
element = 0
for element in xrange(int(request[name + "_count"])):
element_name = name + "_%d" % element
if not (action == "remove"
and request.get(element_name + "_remove") == "on"):
v, r = contained_field.ParseFormValue(request,
element_name,
attachment_stores)
values.append(v)
if r:
redisplay = 1
element += 1
# Remove entries from the request that might cause confusion
# when the page is redisplayed.
names = []
for n, v in request.items():
if n[:len(name)] == name:
names.append(n)
for n in names:
del request[n]
# Validate the values.
values = self.Validate(values)
# If the user requested another element, add to the set.
if action == "add":
redisplay = 1
# There's no need to validate this new value and it may in
# fact be dangerous to do so. For example, the default
# value for a ChoiceField might be the "nothing selected"
# value, which is not a valid selection. If the user does
# not actually select something, the problem will be
# reported when the form is submitted.
values.append(contained_field.GetDefaultValue())
elif action == "remove":
redisplay = 1
return (values, redisplay)
def GetValueFromDomNode(self, node, attachment_store):
# Make sure 'node' is a '<set>' element.
if node.nodeType != xml.dom.Node.ELEMENT_NODE \
or node.tagName != "set":
raise qm.QMException, \
qm.error("dom wrong tag for field",
name=self.GetName(),
right_tag="set",
wrong_tag=node.tagName)
# Use the contained field to extract values for the children of
# this node, which are the set elements.
contained_field = self.__contained
fn = lambda n, f=contained_field, s=attachment_store: \
f.GetValueFromDomNode(n, s)
values = map(fn,
filter(lambda n: n.nodeType == xml.dom.Node.ELEMENT_NODE,
node.childNodes))
return self.Validate(values)
########################################################################
class UploadAttachmentPage(web.DtmlPage):
"""DTML context for generating upload-attachment.dtml."""
def __init__(self,
attachment_store,
field_name,
encoding_name,
summary_field_name,
in_set=0):
"""Create a new page object.
'attachment_store' -- The AttachmentStore in which the new
attachment will be placed.
'field_name' -- The user-visible name of the field for which an
attachment is being uploaded.
'encoding_name' -- The name of the HTML input that should
contain the encoded attachment.
'summary_field_name' -- The name of the HTML input that should
contain the user-visible summary of the attachment.
'in_set' -- If true, the attachment is being added to an
attachment set field."""
web.DtmlPage.__init__(self, "attachment.dtml")
# Use a brand-new location for the attachment data.
self.location = attachment.make_temporary_location()
# Set up properties.
self.attachment_store_id = id(attachment_store)
self.field_name = field_name
self.encoding_name = encoding_name
self.summary_field_name = summary_field_name
self.in_set = in_set
def MakeSubmitUrl(self):
"""Return the URL for submitting this form."""
return self.request.copy(AttachmentField.upload_url).AsUrl()
class AttachmentField(Field):
"""A field containing a file attachment.
Note that the 'FormatValueAsHtml' method uses a popup upload form
for uploading new attachment. The web server must be configured to
handle the attachment submission requests. See
'attachment.register_attachment_upload_script'."""
upload_url = "/attachment-upload"
"""The URL used to upload data for an attachment.
The upload request will include these query arguments:
'location' -- The location at which to store the attachment data.
'file_data' -- The attachment data.
"""
download_url = "/attachment-download"
"""The URL used to download an attachment.
The download request will include this query argument:
'location' -- The location in the attachment store from which to
retrieve the attachment data.
"""
def __init__(self, name = "", **properties):
"""Create an attachment field.
Sets the default value of the field to 'None'."""
# Perform base class initialization.
apply(Field.__init__, (self, name, None), properties)
def GetHelp(self):
return """
An attachment field. An attachment consists of an uploaded
file, which may be of any file type, plus a short description.
The name of the file, as well as the file's MIME type, are also
stored. The description is a single line of plain text.
An attachment need not be provided. The field may be left
empty."""
def GetHtmlHelp(self, edit=0):
help = Field.GetHtmlHelp(self)
if edit:
# In addition to the standard generated help, include
# additional instructions about using the HTML controls.
help = help + """
<hr noshade size="2">
<h4>Modifying This Field</h4>
<p>The text control describes the current value of this
field, displaying the attachment's description, file name,
and MIME type. If the field is empty, the text control
displays "None". The text control cannot be edited.</p>
<p>To upload a new attachment (replacing the previous one,
if any), click on the <i>Change...</i> button. To clear the
current attachment and make the field empty, click on the
<i>Clear</i> button.</p>
"""
return help
### Output methods.
def FormatValueAsText(self, value, columns=72):
return self._FormatSummary(value)
def FormatValueAsHtml(self, server, value, style, name=None):
field_name = self.GetName()
if value is None:
# The attachment field value may be 'None', indicating no
# attachment.
pass
elif isinstance(value, attachment.Attachment):
location = value.GetLocation()
mime_type = value.GetMimeType()
description = value.GetDescription()
file_name = value.GetFileName()
else:
raise ValueError, "'value' must be 'None' or an 'Attachment'"
# Use the default field form field name if requested.
if name is None:
name = self.GetHtmlFormFieldName()
if style == "full" or style == "brief":
if value is None:
return "None"
# Link the attachment description to the data itself.
download_url = web.WebRequest(self.download_url,
location=location,
mime_type=mime_type).AsUrl()
# Here's a nice hack. If the user saves the attachment to a
# file, browsers (some at least) guess the default file name
# from the URL by taking everything following the final
# slash character. So, we add this bogus-looking argument
# to fool the browser into using our file name.
download_url = download_url + \
"&=/" + urllib.quote_plus(file_name)
result = '<a href="%s">%s</a>' \
% (download_url, description)
# For the full style, display the MIME type.
if style == "full":
result = result + ' (%s)' % (mime_type)
return result
elif style == "new" or style == "edit":
# Some trickiness here.
#
# For attachment fields, the user specifies the file to
# upload via a popup form, which is shown in a new browser
# window. When that form is submitted, the attachment data
# is immediately uploaded to the server.
#
# The information that's stored for an attachment is made of
# four parts: a description, a MIME type, the file name, and
# the location of the data itself. The user enters these
# values in the popup form, which sets a hidden field on
# this form to an encoding of that information.
#
# Also, when the popup form is submitted, the attachment
# data is uploaded. By the time this form is submitted, the
# attachment data should be uploaded already. The uploaded
# attachment data is stored in the temporary attachment
# area; it's copied into the IDB when the issue revision is
# submitted.
summary_field_name = "_attachment" + name
# Fill in the description if there's already an attachment.
summary_value = 'value="%s"' % self._FormatSummary(value)
if value is None:
field_value = ""
else:
# We'll encode all the relevant information.
parts = (description, mime_type, location, file_name,
str(id(value.GetStore())))
# Each part is URL-encoded.
parts = map(urllib.quote, parts)
# The parts are joined into a semicolon-delimited list.
field_value = string.join(parts, ";")
field_value = 'value="%s"' % field_value
# Generate the popup upload page.
upload_page = \
UploadAttachmentPage(server.GetTemporaryAttachmentStore(),
self.GetTitle(),
name,
summary_field_name)()
# Generate controls for this form.
# A text control for the user-visible summary of the
# attachment. The "readonly" property isn't supported in
# Netscape, so prevent the user from typing into the form by
# forcing focus away from the control.
text_control = '''
<input type="text"
readonly
size="40"
name="%s"
onfocus="this.blur();"
%s>''' % (summary_field_name, summary_value)
# A button to pop up the upload form. It causes the upload
# page to appear in a popup window.
upload_button \
= server.MakeButtonForCachedPopup("Upload",
upload_page,
window_width=640,
window_height=320)
# A button to clear the attachment.
clear_button = '''
<input type="button"
size="20"
value=" Clear "
name="_clear_%s"
onclick="document.form.%s.value = 'None';
document.form.%s.value = '';" />
''' % (field_name, summary_field_name, name)
# A hidden control for the encoded attachment value. The
# popup upload form fills in this control.
hidden_control = '''
<input type="hidden"
name="%s"
%s>''' % (name, field_value)
# Now assemble the controls with some layout bits.
result = '''
%s%s<br>
%s%s
''' % (text_control, hidden_control, upload_button, clear_button)
return result
else:
raise ValueError, style
def MakeDomNodeForValue(self, value, document):
return attachment.make_dom_node(value, document)
def _FormatSummary(self, attachment):
"""Generate a user-friendly summary for 'attachment'.
This value is used when generating the form. It can't be
editied."""
if attachment is None:
return "None"
else:
return "%s (%s; %s)" \
% (attachment.GetDescription(),
attachment.GetFileName(),
attachment.GetMimeType())
### Input methods.
def Validate(self, value):
# The value should be an instance of 'Attachment', or 'None'.
if value != None and not isinstance(value, attachment.Attachment):
raise ValueError, \
"the value of an attachment field must be an 'Attachment'"
return value
def ParseFormValue(self, request, name, attachment_stores):
encoding = request[name]
# An empty string represnts a missing attachment, which is OK.
if string.strip(encoding) == "":
return None
# The encoding is a semicolon-separated sequence indicating the
# relevant information about the attachment.
parts = string.split(encoding, ";")
# Undo the URL encoding of each component.
parts = map(urllib.unquote, parts)
# Unpack the results.
description, mime_type, location, file_name, store_id = parts
# Figure out which AttachmentStore corresponds to the id
# provided.
store = attachment_stores[int(store_id)]
# Create the attachment.
value = attachment.Attachment(mime_type, description,
file_name, location,
store)
return (self.Validate(value), 0)
def GetValueFromDomNode(self, node, attachment_store):
# Make sure 'node' is an "attachment" element.
if node.nodeType != xml.dom.Node.ELEMENT_NODE \
or node.tagName != "attachment":
raise qm.QMException, \
qm.error("dom wrong tag for field",
name=self.GetName(),
right_tag="attachment",
wrong_tag=node.tagName)
return self.Validate(attachment.from_dom_node(node, attachment_store))
########################################################################
class ChoiceField(TextField):
"""A 'ChoiceField' allows choosing one of several values.
The set of acceptable values can be determined when the field is
created or dynamically. The empty string is used as the "no
choice" value, and cannot therefore be one of the permitted
values."""
def GetItems(self):
"""Return the options from which to choose.
returns -- A sequence of strings, each of which will be
presented as a choice for the user."""
raise NotImplementedError
def FormatValueAsHtml(self, server, value, style, name = None):
if style not in ("new", "edit"):
return qm.fields.TextField.FormatValueAsHtml(self, server,
value,
style, name)
# For an editable field, give the user a choice of available
# resources.
items = self.GetItems()
if name is None:
name = self.GetHtmlFormFieldName()
result = '<select name="%s">\n' % name
# HTML does not permit a "select" tag with no contained "option"
# tags. Therefore, we ensure that there is always one option to
# choose from.
result += ' <option value="">--Select--</option>\n'
# Add the choices for the ordinary options.
for r in self.GetItems():
result += ' <option value="%s"' % r
if r == value:
result += ' selected="selected"'
result += '>%s</option>\n' % r
result += "</select>\n"
return result
def Validate(self, value):
value = super(ChoiceField, self).Validate(value)
if value == "":
raise ValueError, "No choice specified for %s." % self.GetTitle()
return value
class EnumerationField(ChoiceField):
"""A field that contains an enumeral value.
The enumeral value is selected from an enumerated set of values.
An enumeral field uses the following properties:
enumeration -- A mapping from enumeral names to enumeral values.
Names are converted to strings, and values are stored as integers.
ordered -- If non-zero, the enumerals are presented to the user
ordered by value."""
def __init__(self,
name = "",
default_value=None,
enumerals=[],
**properties):
"""Create an enumeration field.
'enumerals' -- A sequence of strings of available
enumerals.
'default_value' -- The default value for this enumeration. If
'None', the first enumeral is used."""
# If we're handed an encoded list of enumerals, decode it.
if isinstance(enumerals, types.StringType):
enumerals = string.split(enumerals, ",")
# Make sure the default value is legitimate.
if not default_value in enumerals and len(enumerals) > 0:
default_value = enumerals[0]
# Perform base class initialization.
super(EnumerationField, self).__init__(name, default_value,
**properties)
# Remember the enumerals.
self.__enumerals = enumerals
def GetItems(self):
"""Return a sequence of enumerals.
returns -- A sequence consisting of string enumerals objects, in
the appropriate order."""
return self.__enumerals
def GetHelp(self):
enumerals = self.GetItems()
help = """
An enumeration field. The value of this field must be one of a
preselected set of enumerals. The enumerals for this field are,
"""
for enumeral in enumerals:
help = help + ' * "%s"\n\n' % enumeral
help = help + '''
The default value of this field is "%s".
''' % str(self.GetDefaultValue())
return help
### Output methods.
def MakeDomNodeForValue(self, value, document):
# Store the name of the enumeral.
return xmlutil.create_dom_text_element(document, "enumeral",
str(value))
### Input methods.
def GetValueFromDomNode(self, node, attachment_store):
# Make sure 'node' is an '<enumeral>' element.
if node.nodeType != xml.dom.Node.ELEMENT_NODE \
or node.tagName != "enumeral":
raise qm.QMException, \
qm.error("dom wrong tag for field",
name=self.GetName(),
right_tag="enumeral",
wrong_tag=node.tagName)
# Extract the value.
return self.Validate(xmlutil.get_dom_text(node))
class BooleanField(EnumerationField):
"""A field containing a boolean value.
The enumeration contains two values: true and false."""
def __init__(self, name = "", default_value = None, **properties):
# Construct the base class.
EnumerationField.__init__(self, name, default_value,
["true", "false"], **properties)
def Validate(self, value):
if qm.common.parse_boolean(value):
value = "true"
else:
value = "false"
return super(BooleanField, self).Validate(value)
########################################################################
class TimeField(IntegerField):
"""A field containing a date and time.
The data and time is stored as seconds since the start of the UNIX
epoch, UTC (the semantics of the standard 'time' function), with
one-second precision. User representations of 'TimeField' fields
show one-minue precision."""
def __init__(self, name = "", **properties):
"""Create a time field.
The field is given a default value for this field is 'None', which
corresponds to the current time when the field value is first
created."""
# Perform base class initalization.
super(TimeField, self).__init__(name, None, **properties)
def GetHelp(self):
if time.daylight:
time_zones = "%s or %s" % time.tzname
else:
time_zones = time.tzname[0]
help = """
This field contains a time and date. The format for the
time and date is 'YYYY-MM-DD HH:MM ZZZ'. The 'ZZZ' field is
the time zone, and may be the local time zone (%s) or
"UTC".
If the date component is omitted, today's date is used. If
the time component is omitted, midnight is used. If the
time zone component is omitted, the local time zone is
used.
""" % time_zones
default_value = self.GetDefaultValue()
if default_value is None:
help = help + """
The default value for this field is the current time.
"""
else:
help = help + """
The default value for this field is %s.
""" % self.FormatValueAsText(default_value)
return help
### Output methods.
def FormatValueAsText(self, value, columns=72):
if value is None:
return "now"
else:
return qm.common.format_time(value, local_time_zone=1)
def FormatValueAsHtml(self, server, value, style, name=None):
value = self.FormatValueAsText(value)
if style == "new" or style == "edit":
return '<input type="text" size="8" name="%s" value="%s" />' \
% (name, value)
elif style == "full" or style == "brief":
# The time is formatted in three parts: the date, the time,
# and the time zone. Replace the space between the time and
# the time zone with a non-breaking space, so that if the
# time is broken onto two lines, it is broken between the
# date and the time.
date, time, time_zone = string.split(value, " ")
return date + " " + time + " " + time_zone
elif style == "hidden":
return '<input type="hidden" name="%s" value="%s" />' \
% (name, value)
else:
raise ValueError, style
### Input methods.
def ParseTextValue(self, value):
return self.Validate(qm.common.parse_time(value,
default_local_time_zone=1))
def GetDefaultValue(self):
default_value = super(TimeField, self).GetDefaultValue()
if default_value is not None:
return default_value
return int(time.time())
class PythonField(Field):
"""A 'PythonField' stores a Python value.
All 'PythonField's are computed; they are never written out, nor can
they be specified directly by users. They are used in situations
where the value of the field is specified programatically by the
system."""
def __init__(self, name = "", default_value = None):
Field.__init__(self, name, default_value, computed = "true")
########################################################################
# Local Variables:
# mode: python
# indent-tabs-mode: nil
# fill-column: 72
# End:
|
MentorEmbedded/qmtest
|
qm/fields.py
|
Python
|
gpl-2.0
| 68,605
|
import wx
import time
class MyForm(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "Timer Tutorial 1",
size=(500,500))
# Add a panel so it looks the correct on all platforms
panel = wx.Panel(self, wx.ID_ANY)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
self.toggleBtn = wx.Button(panel, wx.ID_ANY, "Start")
self.toggleBtn.Bind(wx.EVT_BUTTON, self.onToggle)
def onToggle(self, event):
btnLabel = self.toggleBtn.GetLabel()
if btnLabel == "Start":
print("starting timer...")
self.timer.Start(1000)
self.toggleBtn.SetLabel("Stop")
else:
print("timer stopped!")
self.timer.Stop()
self.toggleBtn.SetLabel("Start")
def update(self, event):
print("\nupdated: ",)
print(time.ctime())
# Run the program
if __name__ == "__main__":
app = wx.App()
frame = MyForm().Show()
app.MainLoop()
|
m-takeuchi/ilislife_wxp
|
test/test_timer.py
|
Python
|
mit
| 1,065
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal - Main Public interface of the WebJournals
"""
import datetime
import time
from invenio.bibformat_engine import \
BibFormatObject, \
format_with_format_template
from invenio.errorlib import register_exception
from invenio.config import \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_SITE
from invenio.urlutils import redirect_to_url
from invenio.webuser import collect_user_info
from invenio.webjournal_config import \
InvenioWebJournalTemplateNotFoundError
from invenio.webjournal_utils import \
get_article_page_from_cache, \
cache_article_page, \
get_current_issue, \
get_journal_template, \
get_release_datetime, \
get_journal_articles, \
get_unreleased_issue_hiding_mode, \
issue_is_later_than, \
datetime_to_issue
def perform_request_index(req, journal_name, issue_number, ln,
category, editor=False, verbose=0):
"""
Central logic function for index pages.
Brings together format templates and MARC rules from the config, with
the requested index page, given by the url parameters.
From config:
- page template for index pages -> formatting
- MARC rule list -> Category Navigation
- MARC tag used for issue numbers -> search (later in the format
elements)
Uses BibFormatObject and format_with_format_template to produce the
required HTML.
"""
current_issue = get_current_issue(ln, journal_name)
if not get_release_datetime(issue_number, journal_name):
# Unreleased issue. Display latest released issue?
unreleased_issues_mode = get_unreleased_issue_hiding_mode(journal_name)
if not editor and \
(unreleased_issues_mode == 'all' or \
(unreleased_issues_mode == 'future' and \
issue_is_later_than(issue_number, current_issue))):
redirect_to_url(req, "%s/journal/%s/%s/%s?ln=%s" % \
(CFG_SITE_URL,
journal_name,
current_issue.split('/')[1],
current_issue.split('/')[0],
ln))
try:
index_page_template = get_journal_template('index',
journal_name,
ln)
except InvenioWebJournalTemplateNotFoundError, e:
register_exception(req=req)
return e.user_box(req)
temp_marc = '''<record>
<controlfield tag="001">0</controlfield>
</record>'''
# create a record and get HTML back from bibformat
user_info = collect_user_info(req)
bfo = BibFormatObject(0, ln=ln, xml_record=temp_marc,
user_info=user_info)
bfo.req = req
verbosity = 0
if editor:
# Increase verbosity only for editors/admins
verbosity = verbose
html = format_with_format_template(index_page_template,
bfo,
verbose=verbosity)
return html
def perform_request_article(req, journal_name, issue_number, ln,
category, recid, editor=False, verbose=0):
"""
Central logic function for article pages.
Loads the format template for article display and displays the requested
article using BibFormat.
'Editor' mode generates edit links on the article view page and disables
caching.
"""
current_issue = get_current_issue(ln, journal_name)
if not get_release_datetime(issue_number, journal_name):
# Unreleased issue. Display latest released issue?
unreleased_issues_mode = get_unreleased_issue_hiding_mode(journal_name)
if not editor and \
(unreleased_issues_mode == 'all' or \
(unreleased_issues_mode == 'future' and \
issue_is_later_than(issue_number, current_issue))):
redirect_to_url(req, "%s/journal/%s/%s/%s?ln=%s" % \
(CFG_SITE_URL,
journal_name,
current_issue.split('/')[1],
current_issue.split('/')[0],
ln))
try:
index_page_template = get_journal_template('detailed',
journal_name,
ln)
except InvenioWebJournalTemplateNotFoundError, e:
register_exception(req=req)
return e.user_box(req)
user_info = collect_user_info(req)
bfo = BibFormatObject(recid, ln=ln, user_info=user_info)
bfo.req = req
# if it is cached, return it
cached_html = get_article_page_from_cache(journal_name, category,
recid, issue_number, ln,
bfo)
if cached_html and not editor:
return cached_html
# Check that this recid is indeed an article
is_article = False
articles = get_journal_articles(journal_name, issue_number, category)
for order, recids in articles.iteritems():
if recid in recids:
is_article = True
break
if not is_article:
redirect_to_url(req, "%s/journal/%s/%s/%s?ln=%s" % \
(CFG_SITE_URL,
journal_name,
issue_number.split('/')[1],
issue_number.split('/')[0],
ln))
# create a record and get HTML back from bibformat
verbosity = 0
if editor:
# Increase verbosity only for editors/admins
verbosity = verbose
html_out = format_with_format_template(index_page_template,
bfo,
verbose=verbosity)
# cache if not in editor mode, and if database is not down
if not editor and not CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
cache_article_page(html_out, journal_name, category,
recid, issue_number, ln)
return html_out
def perform_request_contact(req, ln, journal_name, verbose=0):
"""
Display contact information
"""
try:
contact_page_template = get_journal_template('contact',
journal_name,
ln)
except InvenioWebJournalTemplateNotFoundError, e:
register_exception(req=req)
return e.user_box(req)
user_info = collect_user_info(req)
temp_marc = '''<record>
<controlfield tag="001">0</controlfield>
</record>'''
bfo = BibFormatObject(0,
ln=ln,
xml_record=temp_marc,
user_info=user_info)
bfo.req = req
html = format_with_format_template(contact_page_template,
bfo)
return html
def perform_request_popup(req, ln, journal_name, record):
"""
Display the popup window
"""
try:
popup_page_template = get_journal_template('popup',
journal_name,
ln)
except InvenioWebJournalTemplateNotFoundError, e:
register_exception(req=req)
return e.user_box(req)
user_info = collect_user_info(req)
bfo = BibFormatObject(record, ln=ln, user_info=user_info)
bfo.req = req
html = format_with_format_template(popup_page_template,
bfo)
return html
def perform_request_search(req, journal_name, ln,
archive_issue, archive_select,
archive_date, archive_search, verbose=0):
"""
Logic for the search / archive page.
"""
try:
search_page_template = get_journal_template('search',
journal_name,
ln)
except InvenioWebJournalTemplateNotFoundError, e:
register_exception(req=req)
return e.user_box(req)
if archive_select == "False" and archive_search == "False":
temp_marc = '''<record>
<controlfield tag="001">0</controlfield>
</record>'''
user_info = collect_user_info(req)
bfo = BibFormatObject(0,
ln=ln,
xml_record=temp_marc,
user_info=user_info)
bfo.req = req
html = format_with_format_template(search_page_template,
bfo,
verbose=verbose)
return html
elif archive_select == "Go":
redirect_to_url(req, "%s/journal/%s/%s/%s?ln=%s" % (CFG_SITE_URL,
journal_name,
archive_issue.split('/')[1],
archive_issue.split('/')[0],
ln))
elif archive_search == "Go":
try:
archive_issue_time = datetime.datetime(*time.strptime(archive_date, "%d/%m/%Y")[0:5])
archive_issue = datetime_to_issue(archive_issue_time, journal_name)
if not archive_issue:
archive_issue = get_current_issue(ln, journal_name)
except ValueError:
archive_issue = get_current_issue(ln, journal_name)
redirect_to_url(req, "%s/journal/%s/%s/%s?ln=%s" % (CFG_SITE_URL,
journal_name,
archive_issue.split('/')[1],
archive_issue.split('/')[0],
ln))
|
AlbertoPeon/invenio
|
modules/webjournal/lib/webjournal.py
|
Python
|
gpl-2.0
| 10,968
|
# Copyright 2013 Canonical Ltd.
# This software is licensed under the GNU Affero General Public License
# version 3 (see the file LICENSE).
import logging
import sst.actions
import u1testutils.sso.sst.pages
from base64 import b32decode
from urlparse import urlparse, parse_qs
from u1testutils.sst import log_action
class PageWithUserSubheader(u1testutils.sst.Page):
def __init__(self, open_page=False):
self.subheader = UserSubheader()
super(PageWithUserSubheader, self).__init__(open_page)
class UserSubheader(u1testutils.sso.sst.pages.UserSubheader):
"""The subheader of all the SSO pages.
Extends the class in u1testutils with all the actions that are only needed
by SSO tests.
"""
@log_action(logging.info)
def go_to_authentication_devices(self):
sst.actions.click_link('devices-link')
return YourAuthenticationDevices()
class PageWithUsernameInTitle(PageWithUserSubheader):
def assert_title(self):
user_name = self.subheader.get_user_name()
title = self.title.format(user_name)
sst.actions.assert_title(title)
class LogIn(u1testutils.sso.sst.pages.LogIn):
"""Log in page of the Ubuntu Single Sign On website.
Extends the class in u1testutils to return the Your Account page with all
the actions available.
"""
@log_action(logging.info)
def log_in_to_site_recognized(self, user=None):
"""Fill the log in form and continue to the site that requested it.
Keyword arguments:
user -- The user credentials. It must have the attributes email and
password. If None is passed as the user, it means that the user
has already started session on the identity provider and it's not
necessary to enter the credentials again.
"""
self._log_in(user)
return YourAccount(user.full_name)
class YourAccount(PageWithUserSubheader):
"""Your account page of the Ubuntu Single Sign On website.
Extends the class in u1testutils to use the subheader with all the actions
available.
"""
title = "{0}'s details"
url_path = '/'
qa_anchor = 'edit_account'
def __init__(self, user_name, open_page=False):
self.title = self.title.format(user_name)
super(YourAccount, self).__init__(open_page)
class YourEmailAddresses(u1testutils.sso.sst.pages.PageWithUserSubheader):
title = "{0}'s email addresses"
url_path = '/+emails'
qa_anchor = 'account_emails'
def assert_title(self):
user_name = self.subheader.get_user_name()
sst.actions.assert_title(self.title.format(user_name))
@log_action(logging.info)
def add_email(self, email_address):
sst.actions.write_textfield('id_newemail', email_address)
sst.actions.click_button(sst.actions.get_element(name='continue'))
@log_action(logging.info)
def delete_email(self):
remove_link = sst.actions.get_element_by_css(
'*[data-qa-id="remove_verified_email"]')
sst.actions.click_link(remove_link)
return DeleteEmail()
class EnterConfirmationCode(u1testutils.sso.sst.pages.PageWithUserSubheader):
title = 'Enter confirmation code'
url_path = '/+enter_token'
@log_action(logging.info)
def confirm(self, confirmation_code, email_address):
self._fill_confirmation_form(confirmation_code, email_address)
self._continue()
return u1testutils.sso.sst.pages.CompleteEmailValidation()
def _fill_confirmation_form(self, confirmation_code, email_address):
sst.actions.write_textfield('id_confirmation_code', confirmation_code)
sst.actions.write_textfield('id_email', email_address)
def _continue(self):
sst.actions.click_button(sst.actions.get_element(name='continue'))
@log_action(logging.info)
def confirm_with_error(self, confirmation_code, email_address):
self._fill_confirmation_form(confirmation_code, email_address)
self._continue()
return self
class DeleteEmail(u1testutils.sso.sst.pages.PageWithUserSubheader):
title = 'Delete unverified email'
url_path = '/+remove-email'
@log_action(logging.info)
def confirm(self):
confirm_button = sst.actions.get_element(name='delete')
sst.actions.click_button(confirm_button)
user_name = self.subheader.get_user_name()
return YourEmailAddresses(user_name)
class ResetPassword(u1testutils.sso.sst.pages.PageWithAnonymousSubheader):
title = 'Reset password'
url_path = '/+forgot_password'
headings1 = [
'Ubuntu Single Sign On',
'Reset your Ubuntu Single Sign On password'
]
qa_anchor = 'forgot_password_step_1'
@log_action(logging.info)
def request_password_reset(self, email_address):
sst.actions.write_textfield('id_email', email_address)
# Even though the recaptcha field is ignored for our tests, we do
# want to verify that it is on the page.
sst.actions.write_textfield('recaptcha_response_field', 'ignored')
sst.actions.click_button(sst.actions.get_element(name='continue'))
class LogInFromRedirect(u1testutils.sso.sst.pages.LogIn):
url_path = '/.*/\+decide'
is_url_path_regex = True
headings2 = ['Log in', 'Are you new?']
# Devices pages.
class YourAuthenticationDevices(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-list'
headings1 = ['Ubuntu Single Sign On', 'Your authentication devices']
qa_anchor = 'device_list'
def get_devices(self):
"""Get a list with the name of the devices added by the user."""
if sst.actions.exists_element(id='device-list'):
get_text = lambda x: x.text
return map(
get_text, sst.actions.get_elements_by_css(
'#device-list td.name'))
else:
return []
@log_action(logging.info)
def add_new_authentication_device(self):
add_link = sst.actions.get_element_by_css(
'*[data-qa-id="add_new_device"]'
)
sst.actions.click_link(add_link)
return AddNewAuthenticationDevice()
@log_action(logging.info)
def delete_authentication_device(self, index=0):
delete_link = sst.actions.get_elements_by_css(
'*[data-qa-id="delete_device"]')
sst.actions.click_link(delete_link[index])
return DeleteAuthenticationDevice()
def is_warning_displayed(self):
try:
warning_message = sst.actions.get_element_by_css(
'#missing_backup_device')
return warning_message.is_displayed()
except AssertionError:
return False
class DeleteAuthenticationDevice(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-removal/.*'
is_url_path_regex = True
headings1 = ['Ubuntu Single Sign On', 'Delete device?']
qa_anchor = 'device_removal'
@log_action(logging.info)
def cancel(self):
cancel_link = sst.actions.get_element_by_css(
'*[data-qa-id="cancel_deleting_this_device"]'
)
sst.actions.click_link(cancel_link)
return YourAuthenticationDevices()
@log_action(logging.info)
def confirm_delete_device(self):
delete_anchor = sst.actions.get_element_by_css(
'*[data-qa-id="delete_this_device"]')
sst.actions.click_element(delete_anchor)
return YourAuthenticationDevices()
class AddNewAuthenticationDevice(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-addition'
headings1 = ['Ubuntu Single Sign On', 'Add a new authentication device']
qa_anchor = 'device_addition'
@log_action(logging.info)
def cancel(self):
cancel_link = sst.actions.get_element_by_css('*[data-qa-id="cancel"]')
sst.actions.click_link(cancel_link)
return YourAuthenticationDevices()
@log_action(logging.info)
def get_selected_device(self):
identifiers = ['type_google', 'type_yubi', 'type_generic']
if 'paper_device' in sst.config.flags:
identifiers.append('type_paper')
for identifier in identifiers:
radio = sst.actions.get_element(id=identifier)
if radio.is_selected():
return identifier
def is_paper_device_displayed(self):
try:
paper_device_radio = sst.actions.get_element(id='type_paper')
return paper_device_radio.is_displayed()
except AssertionError:
return False
@log_action(logging.info)
def add_generic_device(self):
self._add_device('type_generic')
return AddGenericDevice()
@log_action(logging.info)
def add_google_device(self):
self._add_device('type_google')
return AddGoogleDevice()
@log_action(logging.info)
def add_yubikey_device(self):
self._add_device('type_yubi')
return AddYubikeyDevice()
@log_action(logging.info)
def add_paper_device(self):
self._add_device('type_paper')
return PaperDevice()
def _add_device(self, device_radio_identifier):
sst.actions.set_radio_value(device_radio_identifier)
add_button = sst.actions.get_element(
tag='button', text_regex='Add device')
sst.actions.click_button(add_button)
class AddDevice(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-addition'
headings1 = ['Ubuntu Single Sign On', 'Add device']
qa_anchor = 'device_addition'
@log_action(logging.info)
def add_device(self, name, one_time_password):
self._fill_device_form(name, one_time_password)
self._click_add_device()
return YourAuthenticationDevices()
def _fill_device_form(self, name, one_time_password):
if name is not None:
sst.actions.write_textfield(self._get_name_text_field(), name)
if one_time_password is not None:
sst.actions.write_textfield(
self._get_one_time_password_text_field(), one_time_password)
def _click_add_device(self):
add_button = sst.actions.get_element_by_css(
'*[data-qa-id="add_generic_device"]')
sst.actions.click_button(add_button)
@log_action(logging.info)
def add_device_with_errors(self, name, one_time_password):
self._fill_device_form(name, one_time_password)
self._click_add_device()
return self
def _get_name_text_field(self):
return sst.actions.get_element(tag='input', name='name')
def _get_one_time_password_text_field(self):
return sst.actions.get_element(tag='input', name='otp')
def get_form_values(self):
name = self._get_name_text_field().get_attribute('value')
one_time_password_text_field = self._get_one_time_password_text_field()
one_time_password = one_time_password_text_field.get_attribute('value')
return name, one_time_password
def get_form_errors(self):
try:
name_error = sst.actions.get_element(id='name-error').text
except AssertionError:
name_error = None
try:
one_time_password_error = sst.actions.get_element(
id='otp-error').text
except AssertionError:
one_time_password_error = None
return name_error, one_time_password_error
@log_action(logging.info)
def cancel(self):
cancel_link = sst.actions.get_element_by_css('*[data-qa-id="cancel"]')
sst.actions.click_link(cancel_link)
return YourAuthenticationDevices()
class AddGenericDevice(AddDevice):
qa_anchor = 'generic_device_addition'
def get_key(self):
return sst.actions.get_element(name='hex_key').get_attribute('value')
class AddGoogleDevice(AddDevice):
def get_key(self, email):
return self._get_key_from_qrcode(email)
def _get_key_from_qrcode(self, email):
img = sst.actions.get_element(tag='img', css_class='qrcode')
src = img.get_attribute('src')
# check the url is well formed
url = urlparse(src)
assert url.scheme == 'https', "incorrect google charts protocol"
msg = "incorrect google charts domain"
assert url.netloc == 'chart.googleapis.com', msg
qs = parse_qs(url.query)['chl'][0]
otpauth = urlparse(qs)
assert email in otpauth.path
# python2.7.3 on quantal has a backport from 2.7 trunk (presumably
# will be 2.7.4) and now urlparse correctly handles query string on
# *all* url types
if otpauth.query:
# urlparse has handled query string
query = otpauth.query
else:
# we need to handle query string parsing
query = otpauth.path.split('?')[1]
b32_key = parse_qs(query)['secret'][0]
aes_key = b32decode(b32_key).encode('hex')
return aes_key
class AddYubikeyDevice(AddDevice):
def get_key(self):
return sst.actions.get_element(name='hex_key').get_attribute('value')
# TODO make a new test_open_add_yubikey_page on the yubikey page object,
# add two functions: is_warning_displayed and get_warning_message
# and assert both of them on the new test. This test is really small and
# gives no real value to a user, so it's a perfect candidate to be
# converted into a django unit test.
def assert_warning(self):
# Check that the YubiKey warning is showing
warning = ('Warning: The YubiKey is shipped with a credential in the '
'short-press slot')
sst.actions.assert_element(css_class='warning', text_regex=warning)
class PaperDevice(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-print/.*'
is_url_path_regex = True
headings1 = ['Ubuntu Single Sign On', 'Printable backup codes']
# TODO This test is really small and gives no real value to a user, so it's
# a perfect candidate to be converted into a django unit test.
def assert_codes_present(self):
# codelist has list of N codes
sst.actions.assert_element('*[data-qa-id="codelist"]')
# TODO This test is really small and gives no real value to a user, so it's
# a perfect candidate to be converted into a django unit test.
def assert_print_button_visible(self):
sst.actions.assert_element('*[data-qa-id="print_btn"]')
@log_action(logging.info)
def go_back_to_device_list(self):
device_list_link = sst.actions.get_element_by_css(
'*[data-qa-id="go_back"]')
sst.actions.click_link(device_list_link)
return YourAuthenticationDevices()
@log_action(logging.info)
def generate_new_codes(self):
generate_link = sst.actions.get_element_by_css(
'*[data-qa-id="generate_codes"]')
sst.actions.click_link(generate_link)
return GenerateNewPaperCodes()
def get_first_code(self):
return sst.actions.get_elements_by_css(
'*[data-qa-id="codelist"]')[0].text
# TODO migrate the paper device functions below
#def store_paper_device(self, name='Printable Backup Codes'):
# acceptance.devices.store_paper_device(name)
#def update_paper_device(self, name='Printable Backup Codes'):
# acceptance.devices.update_paper_device(name)
class GenerateNewPaperCodes(PageWithUsernameInTitle):
title = "{0}'s devices"
url_path = '/device-generate/.*'
is_url_path_regex = True
headings1 = ['Ubuntu Single Sign On', 'Generate new codes']
@log_action(logging.info)
def confirm_new_codes(self):
confirm_button = sst.actions.get_element_by_css(
'*[data-qa-id="confirm-codes"]')
sst.actions.click_button(confirm_button)
return PaperDevice()
@log_action(logging.info)
def cancel(self):
cancel_link = sst.actions.get_element_by_css('*[data-qa-id="cancel"]')
sst.actions.click_link(cancel_link)
return PaperDevice()
|
miing/mci_migo
|
acceptance/pages.py
|
Python
|
agpl-3.0
| 16,073
|
""" Content Selection Subsystem web interface
This module implements the CSS's web interface. The interface exposes endpoints
for voting on requests and displaying voting results.
"""
from __future__ import unicode_literals, print_function
from google.appengine.ext import ndb
from utils.routes import RedirectMixin, Route, HtmlRoute
from werkzeug.urls import url_unquote_plus
from rh.db import Request, Playlist
class WebUIVote(RedirectMixin, Route):
""" Handler that facilitates content suggestion voting """
name = 'css_webui_vote'
path = '/requests/<int:request_id>/suggestions/<url>'
def get_redirect_url(self):
return self.url_for('cds_webui_request',
request_id=self.kwargs['request_id'])
def PATCH(self, request_id, url):
url = url_unquote_plus(url)
self.req = ndb.Key('Request', request_id).get()
if self.req is None:
self.abort(404, 'No such request')
for c in self.req.content_suggestions:
if url == c.url:
c.votes += 1
self.req.put()
return self.redirect()
self.abort(404, 'No such content suggestion')
class WebUIPool(HtmlRoute):
""" Return page with content pool listing """
name = 'css_webui_pool'
path = '/pool'
template_name = 'css/pool.html'
def get_context(self):
return {'pool': Request.fetch_content_pool()}
class WebUIPlaylist(RedirectMixin, HtmlRoute):
""" Add a request's top suggestion to the playlist """
name = 'css_webui_playlist'
path = '/playlist'
template_name = 'css/playlist.html'
def get_context(self):
return {'playlist': Playlist.get_current()}
def get_redirect_url(self):
return self.request.path
def PUT(self):
self.req = ndb.Key('Request',
int(self.request.form['request_id'])).get()
if not self.req:
self.abort(400, 'No request matching the content URL')
if not self.req.top_suggestion:
self.abort(400, 'This request is not a candidate for playlist')
Playlist.add_to_playlist(self.req)
return self.redirect()
|
Outernet-Project/outernet-csds
|
css/webui.py
|
Python
|
gpl-3.0
| 2,196
|
#!/usr/bin/env python
from . import base_repository
from .. import helper
class WordMeaningSoundRepository(base_repository.BaseRepository):
def get_sounds(self, user_id, word_meaning_id):
custom_sounds = self._call_proc_query_all(
"WordMeaningSound_Sounds_S", [user_id, word_meaning_id])
return helper.list_comprehension_by_index(custom_sounds, 0)
def link(self, user_id, word_meaning_id, sound_id):
self._call_proc_non_query("WordMeaningSound_I", [user_id, word_meaning_id, sound_id])
def unlink(self, user_id, word_meaning_id, sound_id):
self._call_proc_non_query("WordMeaningSound_D", [user_id, word_meaning_id, sound_id])
|
hiearth/langstyle
|
langstyle/database/word_meaning_sound_repository.py
|
Python
|
gpl-2.0
| 687
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-22 22:58
from __future__ import unicode_literals
from django.db import migrations
def image_sets(apps, schema_editor):
Post = apps.get_model("blog", "Post")
ImageSet = apps.get_model("pinax_images", "ImageSet")
db_alias = schema_editor.connection.alias
for post in Post.objects.using(db_alias).all():
if post.image_set is None:
post.image_set = ImageSet.objects.using(db_alias).create(created_by=post.author)
post.save()
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_post_image_set'),
]
operations = [
migrations.RunPython(image_sets)
]
|
pinax/pinax-blog
|
pinax/blog/migrations/0012_set_default_imagesets.py
|
Python
|
mit
| 713
|
# -*- coding: utf-8 -*-
from openprocurement.tender.belowthreshold.views.lot import TenderLotResource
from openprocurement.api.utils import (
get_now,
json_view,
context_unpack,
)
from openprocurement.tender.core.validation import (
validate_lot_data,
validate_patch_lot_data,
validate_tender_period_extension,
validate_lot_operation_not_in_allowed_status
)
from openprocurement.tender.core.utils import (
save_tender,
apply_patch,
optendersresource,
calculate_business_date
)
from openprocurement.tender.openua.constants import TENDERING_EXTRA_PERIOD
@optendersresource(name='aboveThresholdUA:Tender Lots',
collection_path='/tenders/{tender_id}/lots',
path='/tenders/{tender_id}/lots/{lot_id}',
procurementMethodType='aboveThresholdUA',
description="Tender Ua lots")
class TenderUaLotResource(TenderLotResource):
@json_view(content_type="application/json", validators=(validate_lot_data, validate_lot_operation_not_in_allowed_status, validate_tender_period_extension), permission='edit_tender')
def collection_post(self):
"""Add a lot
"""
lot = self.request.validated['lot']
lot.date = get_now()
tender = self.request.validated['tender']
tender.lots.append(lot)
if self.request.authenticated_role == 'tender_owner':
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Created tender lot {}'.format(lot.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_lot_create'}, {'lot_id': lot.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('{}:Tender Lots'.format(tender.procurementMethodType), tender_id=tender.id, lot_id=lot.id)
return {'data': lot.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_lot_data, validate_lot_operation_not_in_allowed_status, validate_tender_period_extension), permission='edit_tender')
def patch(self):
"""Update of lot
"""
if self.request.authenticated_role == 'tender_owner':
self.request.validated['tender'].invalidate_bids_data()
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated tender lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_lot_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(permission='edit_tender', validators=(validate_lot_operation_not_in_allowed_status, validate_tender_period_extension))
def delete(self):
"""Lot deleting
"""
lot = self.request.context
res = lot.serialize("view")
tender = self.request.validated['tender']
tender.lots.remove(lot)
if self.request.authenticated_role == 'tender_owner':
tender.invalidate_bids_data()
if save_tender(self.request):
self.LOGGER.info('Deleted tender lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_lot_delete'}))
return {'data': res}
|
openprocurement/openprocurement.tender.openua
|
openprocurement/tender/openua/views/lot.py
|
Python
|
apache-2.0
| 3,349
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="waterfall.insidetextfont", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/waterfall/insidetextfont/_colorsrc.py
|
Python
|
mit
| 427
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Copyright 2016,暗夜幽灵 <darknightghost.cn@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
class ConfilctDepecncency(Exception):
def __init__(self, t1_name, t2_name):
self.t1_name = t1_name
self.t2_name = t2_name
def __str__(self):
return "Conflict target : \"%s\" \"%s\"."%(self.t1_name, self.t2_name)
class MissingDepecncency(Exception):
def __init__(self, deps):
self.missing_names = ""
for t in deps:
if self.missing_names == "":
self.missing_names = self.missing_names + "\"%s\""%(t.name)
else:
self.missing_names = self.missing_names + ", \"%s\""%(t.name)
def __str__(self):
return "Missing depencencies : %s."%(self.missing_names)
class MissingSourceFile(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "Missing source file : \"%s\"."%(self.path)
class SourceFileIsDir(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "Source file : \"%s\" is a directory."%(self.path)
|
darknightghost/fake-serial
|
cfg/err.py
|
Python
|
gpl-3.0
| 1,845
|
import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return json.dumps(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
@HStoreField.register_lookup
class HasKeyLookup(lookups.PostgresSimpleLookup):
lookup_name = 'has_key'
operator = '?'
@HStoreField.register_lookup
class HasKeysLookup(lookups.PostgresSimpleLookup):
lookup_name = 'has_keys'
operator = '?&'
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "%s -> '%s'" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(lookups.FunctionTransform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(lookups.FunctionTransform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/django/contrib/postgres/fields/hstore.py
|
Python
|
mit
| 2,945
|
# -*- coding: utf-8 -*-
"""Test i18n module."""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import sys
import pywikibot
from pywikibot import i18n, bot, plural
from tests.aspects import unittest, TestCase, DefaultSiteTestCase, PwbTestCase
if sys.version_info[0] == 3:
basestring = (str, )
class TestTranslate(TestCase):
"""Test translate method."""
net = False
def setUp(self):
self.msg_localized = {'en': u'test-localized EN',
'nl': u'test-localized NL',
'fy': u'test-localized FY'}
self.msg_semi_localized = {'en': u'test-semi-localized EN',
'nl': u'test-semi-localized NL'}
self.msg_non_localized = {'en': u'test-non-localized EN'}
self.msg_no_english = {'ja': u'test-no-english JA'}
super(TestTranslate, self).setUp()
def testLocalized(self):
"""Test fully localized translations."""
self.assertEqual(i18n.translate('en', self.msg_localized,
fallback=True),
u'test-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_localized,
fallback=True),
u'test-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_localized,
fallback=True),
u'test-localized FY')
def testSemiLocalized(self):
"""Test translate by fallback to an alternative language."""
self.assertEqual(i18n.translate('en', self.msg_semi_localized,
fallback=True),
u'test-semi-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
def testNonLocalized(self):
"""Test translate with missing localisation."""
self.assertEqual(i18n.translate('en', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('fy', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('ru', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
def testNoEnglish(self):
"""Test translate with missing English text."""
self.assertEqual(i18n.translate('en', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('fy', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('nl', self.msg_no_english,
fallback=True),
u'test-no-english JA')
class UserInterfaceLangTestCase(TestCase):
"""Base class for tests using config.userinterface_lang."""
def setUp(self):
"""Change the userinterface language to the site's code."""
super(UserInterfaceLangTestCase, self).setUp()
self.orig_userinterface_lang = pywikibot.config.userinterface_lang
pywikibot.config.userinterface_lang = self.get_site().code
def tearDown(self):
"""Reset the userinterface language."""
pywikibot.config.userinterface_lang = self.orig_userinterface_lang
super(UserInterfaceLangTestCase, self).tearDown()
class TWNSetMessagePackageBase(TestCase):
"""Partial base class for TranslateWiki tests."""
message_package = None
def setUp(self):
"""Load the test translations."""
self.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(self.message_package)
super(TWNSetMessagePackageBase, self).setUp()
def tearDown(self):
"""Load the original translations back."""
super(TWNSetMessagePackageBase, self).tearDown()
i18n.set_messages_package(self.orig_messages_package_name)
class TWNTestCaseBase(TWNSetMessagePackageBase):
"""Base class for TranslateWiki tests."""
@classmethod
def setUpClass(cls):
"""Verify that the test translations are not empty."""
if not isinstance(cls.message_package, basestring):
raise TypeError('%s.message_package must be a package name'
% cls.__name__)
# The call to set_messages_package below exists only to confirm
# that the package exists and messages are available, so
# that tests can be skipped if the i18n data doesnt exist.
cls.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(cls.message_package)
has_messages = i18n.messages_available()
i18n._messages_package_name = cls.orig_messages_package_name
if not has_messages:
raise unittest.SkipTest("i18n messages package '%s' not available."
% cls.message_package)
super(TWNTestCaseBase, cls).setUpClass()
class TestTWTranslate(TWNTestCaseBase):
"""Test twtranslate method."""
net = False
message_package = 'tests.i18n'
def testLocalized(self):
"""Test fully localized entry."""
self.assertEqual(i18n.twtranslate('en', 'test-localized'),
u'test-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-localized'),
u'test-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-localized'),
u'test-localized FY')
def testSemiLocalized(self):
"""Test translating with fallback to alternative language."""
self.assertEqual(i18n.twtranslate('en', 'test-semi-localized'),
u'test-semi-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-semi-localized'),
u'test-semi-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-semi-localized'),
u'test-semi-localized NL')
def testNonLocalized(self):
"""Test translating non localized entries."""
self.assertEqual(i18n.twtranslate('en', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('fy', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('ru', 'test-non-localized'),
u'test-non-localized EN')
def testNoEnglish(self):
"""Test translating into English with missing entry."""
self.assertRaises(i18n.TranslationError, i18n.twtranslate,
'en', 'test-no-english')
class TestTWNTranslate(TWNTestCaseBase):
"""Test {{PLURAL:}} support."""
net = False
message_package = 'tests.i18n'
def testNumber(self):
"""Use a number."""
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 0) % {'num': 0},
u'Bot: Ändere 0 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 1) % {'num': 1},
u'Bot: Ändere 1 Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 2) % {'num': 2},
u'Bot: Ändere 2 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 3) % {'num': 3},
u'Bot: Ändere 3 Seiten.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'},
u'Bot: Changing no pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 1) % {'num': 'one'},
u'Bot: Changing one page.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 2) % {'num': 'two'},
u'Bot: Changing two pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 3) % {'num': 'three'},
u'Bot: Changing three pages.')
def testString(self):
"""Use a string."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'},
u'Bot: Changing one page.')
def testDict(self):
"""Use a dictionary."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', {'num': 2}),
u'Bot: Changing 2 pages.')
def testExtended(self):
"""Use additional format strings."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
def testExtendedOutside(self):
"""Use additional format strings also outside."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', 1) % {'descr': 'seulement'},
u'Robot: Changer seulement une page.')
def testMultiple(self):
"""Test using multiple plural entries."""
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 1)
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 2)
% {'action': u'Ändere', 'line': u'zwei'},
u'Bot: Ändere zwei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 3)
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2, 2))
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', [3, 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["3", 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', "321")
% {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'},
u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 1}),
u'Bot: Ändere 1 Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 2}),
u'Bot: Ändere 1 Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "11", 'page': 2}),
u'Bot: Ändere 11 Zeilen von mehreren Seiten.')
def testMultipleWrongParameterLength(self):
"""Test wrong parameter length."""
err_msg = 'Length of parameter does not match PLURAL occurrences'
with self.assertRaisesRegex(ValueError, err_msg):
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2))
with self.assertRaisesRegex(ValueError, err_msg):
i18n.twntranslate('de', 'test-multiple-plurals', ["321"])
def testMultipleNonNumbers(self):
"""Test error handling for multiple non-numbers."""
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'drei'"):
i18n.twntranslate('de', 'test-multiple-plurals', ["drei", "1", 1])
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'elf'"):
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "elf", 'page': 2})
def testAllParametersExist(self):
"""Test that all parameters are required when using a dict."""
with self.assertRaisesRegex(KeyError, repr(u'line')):
# all parameters must be inside twntranslate
i18n.twntranslate('de', 'test-multiple-plurals',
{'line': 1, 'page': 1}) % {'action': u'Ändere'}
def test_fallback_lang(self):
"""
Test that twntranslate uses the translation's language.
twntranslate calls _twtranslate which might return the translation for
a different language and then the plural rules from that language need
to be applied.
"""
# co has fr as altlang but has no plural rules defined (otherwise this
# test might not catch problems) so it's using the plural variant for 0
# although French uses the plural variant for numbers > 1 (so not 0)
assert 'co' not in plural.plural_rules
assert plural.plural_rules['fr']['plural'](0) is False
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 0, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
class ScriptMessagesTestCase(TWNTestCaseBase):
"""Real messages test."""
net = False
message_package = 'scripts.i18n'
def test_basic(self):
"""Verify that real messages are able to be loaded."""
self.assertEqual(i18n.twntranslate('en', 'pywikibot-enter-new-text'),
'Please enter the new text:')
def test_missing(self):
"""Test a missing message from a real message bundle."""
self.assertRaises(i18n.TranslationError,
i18n.twntranslate, 'en', 'pywikibot-missing-key')
class InputTestCase(TWNTestCaseBase, UserInterfaceLangTestCase, PwbTestCase):
"""Test i18n.input."""
family = 'wikipedia'
code = 'arz'
message_package = 'scripts.i18n'
@classmethod
def setUpClass(cls):
"""Verify that a translation does not yet exist."""
super(InputTestCase, cls).setUpClass()
if cls.code in i18n.twget_keys('pywikibot-enter-category-name'):
raise unittest.SkipTest(
'%s has a translation for %s'
% (cls.code, 'pywikibot-enter-category-name'))
def test_pagegen_i18n_input(self):
"""Test i18n.input via ."""
result = self._execute(args=['listpages', '-cat'],
data_in='non-existant-category\n',
timeout=5)
self.assertIn('Please enter the category name:', result['stderr'])
class MissingPackageTestCase(TWNSetMessagePackageBase,
UserInterfaceLangTestCase,
DefaultSiteTestCase):
"""Test misssing messages package."""
message_package = 'scripts.foobar.i18n'
def _capture_output(self, text, *args, **kwargs):
self.output_text = text
def setUp(self):
"""Patch the output and input methods."""
super(MissingPackageTestCase, self).setUp()
self.output_text = ''
self.orig_raw_input = bot.ui._raw_input
self.orig_output = bot.ui.output
bot.ui._raw_input = lambda *args, **kwargs: 'dummy input'
bot.ui.output = self._capture_output
def tearDown(self):
"""Restore the output and input methods."""
bot.ui._raw_input = self.orig_raw_input
bot.ui.output = self.orig_output
super(MissingPackageTestCase, self).tearDown()
def test_pagegen_i18n_input(self):
"""Test i18n.input falls back with missing message package."""
rv = i18n.input('pywikibot-enter-category-name',
fallback_prompt='dummy output')
self.assertEqual(rv, 'dummy input')
self.assertIn('dummy output: ', self.output_text)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
icyflame/batman
|
tests/i18n_tests.py
|
Python
|
mit
| 17,128
|
from __future__ import absolute_import, print_function
import cython
from .. import __version__
import os
import shutil
import hashlib
import subprocess
import collections
import re, sys, time
from glob import iglob
from io import open as io_open
from os.path import relpath as _relpath
from distutils.extension import Extension
from distutils.util import strtobool
try:
import gzip
gzip_open = gzip.open
gzip_ext = '.gz'
except ImportError:
gzip_open = open
gzip_ext = ''
try:
import pythran
import pythran.config
PythranAvailable = True
except:
PythranAvailable = False
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
safe_makedirs, copy_file_to_dir_if_newer, is_package_dir)
from ..Compiler.Main import Context, CompilationOptions, default_options
join_path = cached_function(os.path.join)
copy_once_if_newer = cached_function(copy_file_to_dir_if_newer)
safe_makedirs_once = cached_function(safe_makedirs)
if sys.version_info[0] < 3:
# stupid Py2 distutils enforces str type in list of sources
_fs_encoding = sys.getfilesystemencoding()
if _fs_encoding is None:
_fs_encoding = sys.getdefaultencoding()
def encode_filename_in_py2(filename):
if not isinstance(filename, bytes):
return filename.encode(_fs_encoding)
return filename
else:
def encode_filename_in_py2(filename):
return filename
basestring = str
def _make_relative(file_paths, base=None):
if not base:
base = os.getcwd()
if base[-1] != os.path.sep:
base += os.path.sep
return [_relpath(path, base) if path.startswith(base) else path
for path in file_paths]
def extended_iglob(pattern):
if '{' in pattern:
m = re.match('(.*){([^}]+)}(.*)', pattern)
if m:
before, switch, after = m.groups()
for case in switch.split(','):
for path in extended_iglob(before + case + after):
yield path
return
if '**/' in pattern:
seen = set()
first, rest = pattern.split('**/', 1)
if first:
first = iglob(first+'/')
else:
first = ['']
for root in first:
for path in extended_iglob(join_path(root, rest)):
if path not in seen:
seen.add(path)
yield path
for path in extended_iglob(join_path(root, '*', '**/' + rest)):
if path not in seen:
seen.add(path)
yield path
else:
for path in iglob(pattern):
yield path
def nonempty(it, error_msg="expected non-empty iterator"):
empty = True
for value in it:
empty = False
yield value
if empty:
raise ValueError(error_msg)
@cached_function
def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
prefix = (str(len(path)) + ":").encode("UTF-8")
m = hashlib.md5(prefix)
m.update(path)
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
"""
>>> parse_list("")
[]
>>> parse_list("a")
['a']
>>> parse_list("a b c")
['a', 'b', 'c']
>>> parse_list("[a, b, c]")
['a', 'b', 'c']
>>> parse_list('a " " b')
['a', ' ', 'b']
>>> parse_list('[a, ",a", "a,", ",", ]')
['a', ',a', 'a,', ',']
"""
if len(s) >= 2 and s[0] == '[' and s[-1] == ']':
s = s[1:-1]
delimiter = ','
else:
delimiter = ' '
s, literals = strip_string_literals(s)
def unquote(literal):
literal = literal.strip()
if literal[0] in "'\"":
return literals[literal[1:-1]]
else:
return literal
return [unquote(item) for item in s.split(delimiter) if item.strip()]
transitive_str = object()
transitive_list = object()
bool_or = object()
distutils_settings = {
'name': str,
'sources': list,
'define_macros': list,
'undef_macros': list,
'libraries': transitive_list,
'library_dirs': transitive_list,
'runtime_library_dirs': transitive_list,
'include_dirs': transitive_list,
'extra_objects': list,
'extra_compile_args': transitive_list,
'extra_link_args': transitive_list,
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
'np_pythran': bool_or
}
def update_pythran_extension(ext):
if not PythranAvailable:
raise RuntimeError("You first need to install Pythran to use the np_pythran directive.")
pythran_ext = pythran.config.make_extension()
ext.include_dirs.extend(pythran_ext['include_dirs'])
ext.extra_compile_args.extend(pythran_ext['extra_compile_args'])
ext.extra_link_args.extend(pythran_ext['extra_link_args'])
ext.define_macros.extend(pythran_ext['define_macros'])
ext.undef_macros.extend(pythran_ext['undef_macros'])
ext.library_dirs.extend(pythran_ext['library_dirs'])
ext.libraries.extend(pythran_ext['libraries'])
ext.language = 'c++'
# These options are not compatible with the way normal Cython extensions work
for bad_option in ["-fwhole-program", "-fvisibility=hidden"]:
try:
ext.extra_compile_args.remove(bad_option)
except ValueError:
pass
@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if end == -1:
yield source[start:]
return
yield source[start:end]
start = end+1
else:
for line in source:
yield line
class DistutilsInfo(object):
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
for line in line_iter(source):
line = line.lstrip()
if not line:
continue
if line[0] != '#':
break
line = line[1:].lstrip()
kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None)
if not kind is None:
key, _, value = [s.strip() for s in line[len(kind):].partition('=')]
type = distutils_settings.get(key, None)
if line.startswith("cython:") and type is None: continue
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=', 1))
if '=' in macro else (macro, None)
for macro in value]
if type is bool_or:
value = strtobool(value)
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources','np_pythran'):
continue
value = getattr(exn, key, None)
if value:
self.values[key] = value
def merge(self, other):
if other is None:
return self
for key, value in other.values.items():
type = distutils_settings[key]
if type is transitive_str and key not in self.values:
self.values[key] = value
elif type is transitive_list:
if key in self.values:
# Change a *copy* of the list (Trac #845)
all = self.values[key][:]
for v in value:
if v not in all:
all.append(v)
value = all
self.values[key] = value
elif type is bool_or:
self.values[key] = self.values.get(key, False) | value
return self
def subs(self, aliases):
if aliases is None:
return self
resolved = DistutilsInfo()
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
new_value_list = []
for v in value:
if v in aliases:
v = aliases[v]
if isinstance(v, list):
new_value_list += v
else:
new_value_list.append(v)
value = new_value_list
else:
if value in aliases:
value = aliases[value]
resolved.values[key] = value
return resolved
def apply(self, extension):
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
value = getattr(extension, key) + list(value)
setattr(extension, key, value)
@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t,
single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t,
hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t,
k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t)
def strip_string_literals(code, prefix='__Pyx_L'):
"""
Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
string literals.
"""
new_code = []
literals = {}
counter = 0
start = q = 0
in_quote = False
hash_mark = single_q = double_q = -1
code_len = len(code)
quote_type = quote_len = None
while True:
if hash_mark < q:
hash_mark = code.find('#', q)
if single_q < q:
single_q = code.find("'", q)
if double_q < q:
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1:
q = max(single_q, double_q)
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
break
# Try to close the quote.
elif in_quote:
if code[q-1] == u'\\':
k = 2
while q >= k and code[q-k] == u'\\':
k += 1
if k % 2 == 0:
q += 1
continue
if code[q] == quote_type and (
quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
counter += 1
label = "%s%s_" % (prefix, counter)
literals[label] = code[start+quote_len:q]
full_quote = code[q:q+quote_len]
new_code.append(full_quote)
new_code.append(label)
new_code.append(full_quote)
q += quote_len
in_quote = False
start = q
else:
q += 1
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
new_code.append(code[start:hash_mark+1])
end = code.find('\n', hash_mark)
counter += 1
label = "%s%s_" % (prefix, counter)
if end == -1:
end_or_none = None
else:
end_or_none = end
literals[label] = code[hash_mark+1:end_or_none]
new_code.append(label)
if end == -1:
break
start = q = end
# Open the quote.
else:
if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
quote_len = 3
else:
quote_len = 1
in_quote = True
quote_type = code[q]
new_code.append(code[start:q])
start = q
q += quote_len
return "".join(new_code), literals
# We need to allow spaces to allow for conditional compilation like
# IF ...:
# cimport ...
dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|"
r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|"
r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|"
r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M)
def normalize_existing(base_path, rel_paths):
return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
@cached_function
def normalize_existing0(base_dir, rel_paths):
"""
Given some base directory ``base_dir`` and a list of path names
``rel_paths``, normalize each relative path name ``rel`` by
replacing it by ``os.path.join(base, rel)`` if that file exists.
Return a couple ``(normalized, needed_base)`` where ``normalized``
if the list of normalized file names and ``needed_base`` is
``base_dir`` if we actually needed ``base_dir``. If no paths were
changed (for example, if all paths were already absolute), then
``needed_base`` is ``None``.
"""
normalized = []
needed_base = None
for rel in rel_paths:
if os.path.isabs(rel):
normalized.append(rel)
continue
path = join_path(base_dir, rel)
if path_exists(path):
normalized.append(os.path.normpath(path))
needed_base = base_dir
else:
normalized.append(rel)
return (normalized, needed_base)
def resolve_depends(depends, include_dirs):
include_dirs = tuple(include_dirs)
resolved = []
for depend in depends:
path = resolve_depend(depend, include_dirs)
if path is not None:
resolved.append(path)
return resolved
@cached_function
def resolve_depend(depend, include_dirs):
if depend[0] == '<' and depend[-1] == '>':
return None
for dir in include_dirs:
path = join_path(dir, depend)
if path_exists(path):
return os.path.normpath(path)
return None
@cached_function
def package(filename):
dir = os.path.dirname(os.path.abspath(str(filename)))
if dir != filename and is_package_dir(dir):
return package(dir) + (os.path.basename(dir),)
else:
return ()
@cached_function
def fully_qualified_name(filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(package(filename) + (module,))
@cached_function
def parse_dependencies(source_filename):
# Actual parsing is way too slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, error_handling='ignore')
try:
source = fh.read()
finally:
fh.close()
distutils_info = DistutilsInfo(source)
source, literals = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
# TODO: pure mode
cimports = []
includes = []
externs = []
for m in dependency_regex.finditer(source):
cimport_from, cimport_list, extern, include = m.groups()
if cimport_from:
cimports.append(cimport_from)
elif cimport_list:
cimports.extend(x.strip() for x in cimport_list.split(","))
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return cimports, includes, externs, distutils_info
class DependencyTree(object):
def __init__(self, context, quiet=False):
self.context = context
self.quiet = quiet
self._transitive_cache = {}
def parse_dependencies(self, source_filename):
if path_exists(source_filename):
source_filename = os.path.normpath(source_filename)
return parse_dependencies(source_filename)
@cached_method
def included_files(self, filename):
# This is messy because included files are textually included, resolving
# cimports (but not includes) relative to the including file.
all = set()
for include in self.parse_dependencies(filename)[1]:
include_path = join_path(os.path.dirname(filename), include)
if not path_exists(include_path):
include_path = self.context.find_include_file(include, None)
if include_path:
if '.' + os.path.sep in include_path:
include_path = os.path.normpath(include_path)
all.add(include_path)
all.update(self.included_files(include_path))
elif not self.quiet:
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return all
@cached_method
def cimports_externs_incdirs(self, filename):
# This is really ugly. Nested cimports are resolved with respect to the
# includer, but includes are resolved with respect to the includee.
cimports, includes, externs = self.parse_dependencies(filename)[:3]
cimports = set(cimports)
externs = set(externs)
incdirs = set()
for include in self.included_files(filename):
included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include)
cimports.update(included_cimports)
externs.update(included_externs)
incdirs.update(included_incdirs)
externs, incdir = normalize_existing(filename, externs)
if incdir:
incdirs.add(incdir)
return tuple(cimports), externs, incdirs
def cimports(self, filename):
return self.cimports_externs_incdirs(filename)[0]
def package(self, filename):
return package(filename)
def fully_qualified_name(self, filename):
return fully_qualified_name(filename)
@cached_method
def find_pxd(self, module, filename=None):
is_relative = module[0] == '.'
if is_relative and not filename:
raise NotImplementedError("New relative imports.")
if filename is not None:
module_path = module.split('.')
if is_relative:
module_path.pop(0) # just explicitly relative
package_path = list(self.package(filename))
while module_path and not module_path[0]:
try:
package_path.pop()
except IndexError:
return None # FIXME: error?
module_path.pop(0)
relative = '.'.join(package_path + module_path)
pxd = self.context.find_pxd_file(relative, None)
if pxd:
return pxd
if is_relative:
return None # FIXME: error?
return self.context.find_pxd_file(module, None)
@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'):
pxd_list = [filename[:-4] + '.pxd']
else:
pxd_list = []
for module in self.cimports(filename):
if module[:7] == 'cython.' or module == 'cython':
continue
pxd_file = self.find_pxd(module, filename)
if pxd_file is not None:
pxd_list.append(pxd_file)
elif not self.quiet:
print("%s: cannot find cimported module '%s'" % (filename, module))
return tuple(pxd_list)
@cached_method
def immediate_dependencies(self, filename):
all = set([filename])
all.update(self.cimported_files(filename))
all.update(self.included_files(filename))
return all
def all_dependencies(self, filename):
return self.transitive_merge(filename, self.immediate_dependencies, set.union)
@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
def extract_timestamp(self, filename):
return self.timestamp(filename), filename
def newest_dependency(self, filename):
return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
def transitive_fingerprint(self, filename, extra=None):
try:
m = hashlib.md5(__version__.encode('UTF-8'))
m.update(file_hash(filename).encode('UTF-8'))
for x in sorted(self.all_dependencies(filename)):
if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
m.update(file_hash(x).encode('UTF-8'))
if extra is not None:
m.update(str(extra).encode('UTF-8'))
return m.hexdigest()
except IOError:
return None
def distutils_info0(self, filename):
info = self.parse_dependencies(filename)[3]
kwds = info.values
cimports, externs, incdirs = self.cimports_externs_incdirs(filename)
basedir = os.getcwd()
# Add dependencies on "cdef extern from ..." files
if externs:
externs = _make_relative(externs, basedir)
if 'depends' in kwds:
kwds['depends'] = list(set(kwds['depends']).union(externs))
else:
kwds['depends'] = list(externs)
# Add include_dirs to ensure that the C compiler will find the
# "cdef extern from ..." files
if incdirs:
include_dirs = list(kwds.get('include_dirs', []))
for inc in _make_relative(incdirs, basedir):
if inc not in include_dirs:
include_dirs.append(inc)
kwds['include_dirs'] = include_dirs
return info
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
except KeyError:
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
deps = extract(node)
if node in stack:
return deps, node
try:
stack[node] = len(stack)
loop = None
for next in outgoing(node):
sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
if sub_loop is not None:
if loop is not None and stack[loop] < stack[sub_loop]:
pass
else:
loop = sub_loop
deps = merge(deps, sub_deps)
if loop == node:
loop = None
if loop is None:
seen[node] = deps
return deps, loop
finally:
del stack[node]
_dep_tree = None
def create_dependency_tree(ctx=None, quiet=False):
global _dep_tree
if _dep_tree is None:
if ctx is None:
ctx = Context(["."], CompilationOptions(default_options))
_dep_tree = DependencyTree(ctx, quiet=quiet)
return _dep_tree
# If this changes, change also docs/src/reference/compilation.rst
# which mentions this function
def default_create_extension(template, kwds):
if 'depends' in kwds:
include_dirs = kwds.get('include_dirs', []) + ["."]
depends = resolve_depends(kwds['depends'], include_dirs)
kwds['depends'] = sorted(set(depends + template.depends))
t = template.__class__
ext = t(**kwds)
metadata = dict(distutils=kwds, module_name=kwds['name'])
return (ext, metadata)
# This may be useful for advanced users?
def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,
exclude_failures=False):
if language is not None:
print('Please put "# distutils: language=%s" in your .pyx or .pxd file(s)' % language)
if exclude is None:
exclude = []
if patterns is None:
return [], {}
elif isinstance(patterns, basestring) or not isinstance(patterns, collections.Iterable):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
module_metadata = {}
# workaround for setuptools
if 'setuptools' in sys.modules:
Extension_distutils = sys.modules['setuptools.extension']._Extension
Extension_setuptools = sys.modules['setuptools'].Extension
else:
# dummy class, in case we do not have setuptools
Extension_distutils = Extension
class Extension_setuptools(Extension): pass
# if no create_extension() function is defined, use a simple
# default function.
create_extension = ctx.options.create_extension or default_create_extension
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = Extension(pattern, []) # Fake Extension without sources
name = '*'
base = None
ext_language = language
elif isinstance(pattern, (Extension_distutils, Extension_setuptools)):
cython_sources = [s for s in pattern.sources
if os.path.splitext(s)[1] in ('.py', '.pyx')]
if cython_sources:
filepattern = cython_sources[0]
if len(cython_sources) > 1:
print("Warning: Multiple cython sources found for extension '%s': %s\n"
"See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
"for sharing declarations among Cython files." % (pattern.name, cython_sources))
else:
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
ext_language = None # do not override whatever the Extension says
else:
msg = str("pattern is not of type str nor subclass of Extension (%s)"
" but of type %s and class %s" % (repr(Extension),
type(pattern),
pattern.__class__))
raise TypeError(msg)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
module_name = deps.fully_qualified_name(file)
if '*' in name:
if module_name in explicit_modules:
continue
elif name:
module_name = name
if module_name == 'cython':
raise ValueError('cython is a special module, cannot be used as a module name')
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
kwds['name'] = module_name
sources = [file] + [m for m in template.sources if m != filepattern]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
kwds['sources'] = sources
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
np_pythran = kwds.pop('np_pythran', False)
# Create the new extension
m, metadata = create_extension(template, kwds)
m.np_pythran = np_pythran or getattr(m, 'np_pythran', False)
if m.np_pythran:
update_pythran_extension(m)
module_list.append(m)
# Store metadata (this will be written as JSON in the
# generated C file but otherwise has no purpose)
module_metadata[module_name] = metadata
if file not in m.sources:
# Old setuptools unconditionally replaces .pyx with .c/.cpp
target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c')
try:
m.sources.remove(target_file)
except ValueError:
# never seen this in the wild, but probably better to warn about this unexpected case
print("Warning: Cython source file not found in sources list, adding %s" % file)
m.sources.insert(0, file)
seen.add(name)
return module_list, module_metadata
# This is the user-exposed entry point.
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
As module list, pass either a glob pattern, a list of glob patterns or a list of
Extension objects. The latter allows you to configure the extensions separately
through the normal distutils options.
When using glob patterns, you can exclude certain module names explicitly
by passing them into the 'exclude' option.
To globally enable C++ mode, you can pass language='c++'. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into cythonize() will not be changed.
For parallel compilation, set the 'nthreads' option to the number of
concurrent builds.
For a broad 'try to compile' mode that ignores compilation failures and
simply excludes the failed extensions, pass 'exclude_failures=True'. Note
that this only really makes sense for compiling .py files which can also
be used without compilation.
Additional compilation options can be passed as keyword arguments.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
safe_makedirs(options['common_utility_include_dir'])
pythran_options = None
if PythranAvailable:
pythran_options = CompilationOptions(**options)
pythran_options.cplus = True
pythran_options.np_pythran = True
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
def copy_to_build_dir(filepath, root=os.getcwd()):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
# distutil extension depends are relative to cwd
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
for dep in m.depends:
copy_to_build_dir(dep)
cy_sources = [
source for source in m.sources
if os.path.splitext(source)[1] in ('.pyx', '.py')]
if len(cy_sources) == 1:
# normal "special" case: believe the Extension module name to allow user overrides
full_module_name = m.name
else:
# infer FQMN from source files
full_module_name = None
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet and not force:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and options.cache:
extra = m.language
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
to_compile.append((
priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name),
full_module_name))
new_sources.append(c_file)
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if options.cache:
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
if not nthreads:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if options.cache:
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
<?xml version="1.0" ?>
<testsuite name="%(name)s" errors="0" failures="%(failures)s" tests="1" time="%(t)s">
<testcase classname="%(name)s" name="cythonize">
%(failure_item)s
</testcase>
</testsuite>
""".strip() % locals())
output.close()
return with_record
else:
def record_results(func):
return func
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None,
raise_on_failure=True, embedded_metadata=None, full_module_name=None,
progress=""):
from ..Compiler.Main import compile_single, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
safe_makedirs(options.cache)
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
options.cache, "%s-%s%s" % (os.path.basename(c_file), fingerprint, gzip_ext))
if os.path.exists(fingerprint_file):
if not quiet:
print("%sFound compiled %s in cache" % (progress, pyx_file))
os.utime(fingerprint_file, None)
g = gzip_open(fingerprint_file, 'rb')
try:
f = open(c_file, 'wb')
try:
shutil.copyfileobj(g, f)
finally:
f.close()
finally:
g.close()
return
if not quiet:
print("%sCythonizing %s" % (progress, pyx_file))
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
options.embedded_metadata = embedded_metadata
any_failures = 0
try:
result = compile_single(pyx_file, options, full_module_name=full_module_name)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
f = open(c_file, 'rb')
try:
g = gzip_open(fingerprint_file, 'wb')
try:
shutil.copyfileobj(f, g)
finally:
g.close()
finally:
f.close()
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m)
except Exception:
traceback.print_exc()
raise
def _init_multiprocessing_helper():
# KeyboardInterrupt kills workers, so don't let them get it
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/Cython/Build/Dependencies.py
|
Python
|
mit
| 43,610
|
import pyb
print("Executing main.py")
led = pyb.LED(1)
led.on()
pyb.delay(100)
led.off()
pyb.delay(100)
led.on()
pyb.delay(100)
led.off()
|
HenrikSolver/micropython
|
teensy/memzip_files/main.py
|
Python
|
mit
| 143
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CommonInfo',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=100)),
('date', models.DateField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Files',
fields=[
('commoninfo_ptr', models.OneToOneField(parent_link=True, auto_created=True, to='p311.CommonInfo', serialize=False, primary_key=True)),
('orgname', models.CharField(max_length=100)),
],
options={
},
bases=('p311.commoninfo',),
),
migrations.CreateModel(
name='Results',
fields=[
('commoninfo_ptr', models.OneToOneField(parent_link=True, auto_created=True, to='p311.CommonInfo', serialize=False, primary_key=True)),
('service', models.CharField(max_length=3)),
('result', models.BooleanField(default=False)),
('description', models.CharField(max_length=250)),
('src_file', models.ForeignKey(to='p311.Files')),
],
options={
},
bases=('p311.commoninfo',),
),
]
|
max1k/cbs
|
p311/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,608
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Asynchronous event notifications from virtualization drivers.
This module defines a set of classes representing data for
various asynchronous events that can occur in a virtualization
driver.
"""
import time
from nova.i18n import _
EVENT_LIFECYCLE_STARTED = 0
EVENT_LIFECYCLE_STOPPED = 1
EVENT_LIFECYCLE_PAUSED = 2
EVENT_LIFECYCLE_RESUMED = 3
NAMES = {
EVENT_LIFECYCLE_STARTED: _('Started'),
EVENT_LIFECYCLE_STOPPED: _('Stopped'),
EVENT_LIFECYCLE_PAUSED: _('Paused'),
EVENT_LIFECYCLE_RESUMED: _('Resumed')
}
class Event(object):
"""Base class for all events emitted by a hypervisor.
All events emitted by a virtualization driver are
subclasses of this base object. The only generic
information recorded in the base class is a timestamp
indicating when the event first occurred. The timestamp
is recorded as fractional seconds since the UNIX epoch.
"""
def __init__(self, timestamp=None):
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
def get_timestamp(self):
return self.timestamp
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
self.timestamp)
class InstanceEvent(Event):
"""Base class for all instance events.
All events emitted by a virtualization driver which
are associated with a virtual domain instance are
subclasses of this base object. This object records
the UUID associated with the instance.
"""
def __init__(self, uuid, timestamp=None):
super(InstanceEvent, self).__init__(timestamp)
self.uuid = uuid
def get_instance_uuid(self):
return self.uuid
def __repr__(self):
return "<%s: %s, %s>" % (
self.__class__.__name__,
self.timestamp,
self.uuid)
class LifecycleEvent(InstanceEvent):
"""Class for instance lifecycle state change events.
When a virtual domain instance lifecycle state changes,
events of this class are emitted. The EVENT_LIFECYCLE_XX
constants defined why lifecycle change occurred. This
event allows detection of an instance starting/stopping
without need for polling.
"""
def __init__(self, uuid, transition, timestamp=None):
super(LifecycleEvent, self).__init__(uuid, timestamp)
self.transition = transition
def get_transition(self):
return self.transition
def get_name(self):
return NAMES.get(self.transition, _('Unknown'))
def __repr__(self):
return "<%s: %s, %s => %s>" % (
self.__class__.__name__,
self.timestamp,
self.uuid,
self.get_name())
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/virt/event.py
|
Python
|
gpl-2.0
| 3,342
|
import claudio
import matplotlib.pyplot as plt
import numpy as np
import minst.signal as S
def draw_onset_data(audio_file, onset_data, title):
x, fs = claudio.read(audio_file, samplerate=22050, channels=1, bytedepth=2)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 6))
nhop = 100
x_max = np.abs(x).max()
trange = np.arange(0, len(x), nhop) / float(fs)
axes[0].plot(trange, x.flatten()[::nhop])
if not onset_data.empty:
axes[0].vlines(onset_data.time, ymin=-1.05*x_max, ymax=1.05*x_max,
color='k', alpha=0.5, linewidth=3)
log_env_lpf = S.log_envelope(x, fs, 100)
axes[1].plot(trange, log_env_lpf[::nhop])
if not onset_data.empty:
axes[1].vlines(onset_data.time, ymin=log_env_lpf.min()*1.05,
ymax=0, color='k', alpha=0.5, linewidth=3)
for ax in axes:
ax.set_xlim(0, trange.max())
ax.set_xlabel("Time (sec)")
axes[0].set_title(title)
return fig
|
ejhumphrey/minst-dataset
|
minst/visualize.py
|
Python
|
isc
| 990
|
#!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to interactively call methods of code snippets.
Mobly Code Snippet Lib (https://github.com/google/mobly-snippet-lib/) is a
library for triggering custom actions on Android devices by means of an RPC
service.
Usage:
$ snippet_shell com.my.package.snippets
>>> s.mySnippet('example')
u'You said: example'
"""
import argparse
import logging
import sys
from mobly.controllers import android_device
from mobly.controllers.android_device_lib import jsonrpc_shell_base
class SnippetShell(jsonrpc_shell_base.JsonRpcShellBase):
def __init__(self, package):
self._package = package
def _start_services(self, console_env):
"""Overrides superclass."""
self._ad.load_snippet(name='snippet', package=self._package)
console_env['snippet'] = self._ad.snippet
console_env['s'] = self._ad.snippet
def _get_banner(self, serial):
lines = [
'Connected to %s.' % serial, 'Call methods against:',
' ad (android_device.AndroidDevice)', ' snippet or s (Snippet)'
]
return '\n'.join(lines)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Interactive client for Mobly code snippets.')
parser.add_argument(
'-s',
'--serial',
help='Device serial to connect to (if more than one device is connected)')
parser.add_argument('package',
metavar='PACKAGE_NAME',
type=str,
nargs='?',
help='The package name of the snippet to use.')
parser.add_argument('--mbs',
help='Whether to connect to Mobly Bundled Snippets',
action='store_true')
args = parser.parse_args()
if args.package and args.mbs:
print('Cannot specify both --package and --mbs', file=sys.stderr)
sys.exit(1)
if args.mbs:
package = android_device.MBS_PACKAGE
else:
package = args.package
logging.basicConfig(level=logging.INFO)
SnippetShell(package).main(args.serial)
|
google/mobly
|
tools/snippet_shell.py
|
Python
|
apache-2.0
| 2,593
|
import dis
s = open('scope01.py').read()
co = compile(s, 'scope01.py', 'exec')
print dis.dis(co)
|
lizbew/code-practice
|
11-pyvm/print_bytecode.py
|
Python
|
apache-2.0
| 99
|
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
|
Lx37/seaborn
|
seaborn/timeseries.py
|
Python
|
bsd-3-clause
| 15,218
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fnmatch
import imp
import os
from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
class VariantGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
result = self.all_variants
if testcase.outcomes:
if statusfile.OnlyStandardVariant(testcase.outcomes):
return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
class TestSuite(object):
@staticmethod
def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root)
except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
def __init__(self, name, root):
# Note: This might be called concurrently from different processes.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
self.rules = None # dictionary mapping test path to list of outcomes
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def _VariantGeneratorFactory(self):
"""The variant generator class to be used."""
return VariantGenerator
def CreateVariantGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
Returns: An object of type VariantGenerator.
"""
return self._VariantGeneratorFactory()(self, set(variants))
def PrepareSources(self):
"""Called once before multiprocessing for doing file-system operations.
This should not access the network. For network access use the method
below.
"""
pass
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
self.rules, self.wildcards = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
pass_fail_tests="dontcare",
variants=False):
# Use only variants-dependent rules and wildcards when filtering
# respective test cases and generic rules when filtering generic test
# cases.
if not variants:
rules = self.rules[""]
wildcards = self.wildcards[""]
else:
# We set rules and wildcards to a variant-specific version for each test
# below.
rules = {}
wildcards = {}
filtered = []
# Remember used rules as tuples of (rule, variant), where variant is "" for
# variant-independent rules.
used_rules = set()
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
if variants:
rules = self.rules[variant]
wildcards = self.wildcards[variant]
if testname in rules:
used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add((rule, variant))
t.outcomes = t.outcomes | wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in wildcards"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.wildcards[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.wildcards[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.wildcards[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.wildcards[variant][rule], variant))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
args can be a glob: asterisks in any position of the argument
represent zero or more characters. Without asterisks, only exact matches
will be used with the exeption of the test-suite name as argument.
"""
filtered = []
globs = []
for a in args:
argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = '/'.join(argpath[1:])
globs.append(path)
for t in self.tests:
for g in globs:
if fnmatch.fnmatch(t.path, g):
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, testcase):
return testcase.output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class StandardVariantGenerator(VariantGenerator):
def FilterVariantsByTest(self, testcase):
return self.standard_variant
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code == 0:
break
print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
print output.stdout
print "\nStderr:"
print output.stderr
print "\nExit code: %d" % output.exit_code
else:
raise Exception("Test executable failed to list the tests.")
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc)
tests.append(test)
tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
def shell(self):
return self.name
|
macchina-io/macchina.io
|
platform/JS/V8/v8/tools/testrunner/local/testsuite.py
|
Python
|
apache-2.0
| 12,117
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from setuptools import setup, find_packages
tests_require = []
setup(
name='cloak-server',
version='0.3',
author='Encrypt.me',
author_email='hello@encrypt.me',
description="Tool for configuring private Encrypt.me endpoints.",
license='BSD',
url='https://github.com/encryptme/private-end-points',
install_requires=[
'asn1crypto>=0.21.0,<1.0.0',
'csrbuilder>=0.10.1',
'oscrypto>=0.18.0,<1.0.0',
'requests>=2.5.1',
'six>=1.10.0',
'typing',
],
packages=find_packages(),
namespace_packages=['cloak'],
scripts=[
'bin/cloak-server'
],
test_suite='cloak.serverapi.tests',
tests_require=tests_require,
)
|
encryptme/private-end-points
|
setup.py
|
Python
|
mit
| 837
|
# -*- coding: UTF-8 -*-
import re
import sys
import jieba
import numpy as np
import pandas as pd
from gensim import corpora, models, similarities
sys.path.append("utility")
from utility.async_decor import async_wrapper
from utility.logger_decor import create_logger, exception
class Docsim(object):
"""
文本相似度
"""
logger = create_logger()
def __init__(self, keep_val: float = 0.8):
"""
:param keep_val: 设定的阈值
"""
self.keep_val = keep_val
def load_data(self, input_file):
"""读取数据,分词存储
Arguments:
inputFile {[type]} -- 输入的文件
"""
df_data = pd.read_excel(input_file, names=["SysNo", "Title", "Content"])
corpora_documents = []
idx_dic = {}
for idx, row in df_data.iterrows():
seg_title = self.segment(row.Title)
seg_content = self.segment(row.Content)
idx_dic[idx] = row.SysNo
corpora_documents.append((seg_title, seg_content))
titles = [t for t, _ in corpora_documents]
contents = [c for _, c in corpora_documents]
# 保存分词文件
np.save("./data/title_words.npy", titles)
np.save("./data/content_words.npy", contents)
# 保存索引字典
with open('./data/idx_dic.dic', 'w') as f:
f.write(str(idx_dic))
return corpora_documents
def segment(self, doc: str):
"""分词
Arguments:
doc {str} -- 输入文本
Returns:
[type] -- [description]
"""
# 停用词
stop_words = pd.read_csv("./data/stopwords_TUH.txt", index_col=False, quoting=3,
names=['stopword'],
sep="\n",
encoding='utf-8')
stop_words = list(stop_words.stopword)
reg_html = re.compile(r'<[^>]+>', re.S)
doc = reg_html.sub('', doc)
word_list = list(jieba.cut(doc))
out_str = ''
for word in word_list:
if word not in stop_words:
out_str += word
out_str += ' '
segments = out_str.split(sep=" ")
return segments
def train(self, prefix: str, corporas: list):
""" 训练模型
保存字典,语料,模型到磁盘
Arguments:
prefix {str} -- 模型名称前缀
corpora_documents {list} -- 分词后的文本
"""
# 生成字典和向量语料
dictionary = corpora.Dictionary(corporas)
dictionary.save('./models/{}_dict.dic'.format(prefix)) # 保存生成的词典
corpus = [dictionary.doc2bow(text) for text in corporas]
corpora.MmCorpus.serialize('./models/{}_corpuse.mm'.format(prefix), corpus) # 保存生成的语料
tfidf_model = models.TfidfModel(corpus)
tfidf_model.save("./models/{}_tfidf_model.model".format(prefix)) # 保存Tfidf模型
@exception(logger)
def calc_similarity(self, prefix: str, text: str):
"""计算相似度
返回索引和余弦值
Arguments:
prefix {str} -- 模型前缀
text {str} -- 文本数据
value {float} -- 设定的阈值,返回大于这个值的数据
"""
dictionary = corpora.Dictionary.load('./models/{}_dict.dic'.format(prefix)) # 加载字典
corpus = corpora.MmCorpus('./models/{}_corpuse.mm'.format(prefix)) # 加载语料
tfidf_model = models.TfidfModel.load("./models/{}_tfidf_model.model".format(prefix)) # 加载Tfidf模型
corpus_tfidf = tfidf_model[corpus]
lsi = models.LsiModel(corpus_tfidf)
corpus_lsi = lsi[corpus_tfidf]
similarity_lsi = similarities.Similarity('./models/similarity-lsi-index',
corpus_lsi,
num_features=400,
num_best=3)
cut_raw = self.segment(text) # 1.分词
corpus = dictionary.doc2bow(cut_raw) # 2.转换成bow向量
corpus_tfidf = tfidf_model[corpus] # 3.计算tfidf值
corpus_lsi = lsi[corpus_tfidf] # 4.计算lsi值
sims = similarity_lsi[corpus_lsi]
with open('./data/idx_dic.dic', 'r') as f:
dt = f.read()
idx_dic = eval(dt)
result = []
if sims is not None:
result = [idx_dic[idx] for idx, val in sims if val > self.keep_val]
return result
@async_wrapper
def update_model(self, prefix: str, sysno: int, doc: str):
"""
更新字典
:param prefix:
:param sysno: 系统编号
:param doc: 文本内容
:return:
"""
corporas = self.segment(doc)
# # 更新字典
dictionary = corpora.Dictionary.load('./models/{}_dict.dic'.format(prefix)) # 加载
dictionary.add_documents([corporas])
dictionary.save('./models/{}_dict.dic'.format(prefix)) # 保存生成的词典
corporas_docs = np.load("./data/{}_words.npy".format(prefix))
corporas_docs = list(corporas_docs)
corporas_docs.append(corporas)
np.save("./data/{}_words.npy".format(prefix), corporas_docs)
# 更新corpus
corpus = [dictionary.doc2bow(text) for text in corporas_docs]
corpora.MmCorpus.serialize('./models/{}_corpuse.mm'.format(prefix), corpus)
# 更新TfidfModel
tfidf_model = models.TfidfModel(corpus)
tfidf_model.save("./models/{}_tfidf_model.model".format(prefix))
# 更新索引字典
with open('./data/idx_dic.dic', 'r') as f:
dt = f.read()
idx_dic = eval(dt)
if sysno not in idx_dic.values():
idx_dic[len(idx_dic)] = sysno
with open('./data/idx_dic.dic', 'w') as f:
f.write(str(idx_dic))
if __name__ == '__main__':
doc = Docsim()
documents = doc.load_data("./data/news.xlsx")
title = [t for t, _ in documents]
doc.train("title", title)
content = [c for _, c in documents]
doc.train("content", content)
# ids = doc.get_simdoc("title", "分布式系统中的多台计算机之间在空间位置上可以随意分布")
# ids = doc.get_simdoc("title", "字典是另一种可变容器模型且可存储任意类型对象")
# print(ids)
# doc.update_model("title",301, "分布式系统中的多台计算机之间在空间位置上可以随意分布")
# doc.update_model("title", 302, "字典是另一种可变容器模型且可存储任意类型对象")
# doc.get_simdoc("title", "分布式系统中的多台计算机之间在空间位置上可以随意分布")
# doc.update_model("title", 302, "字典是另一种可变容器模型且可存储任意类型对象")
|
jarvisqi/nlp_learn
|
gensim/docsim.py
|
Python
|
mit
| 6,907
|
import re
import uuid as py_uuid
from common_fixtures import * # NOQA
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
@pytest.fixture(scope='session')
def docker_client(super_client):
for host in super_client.list_host(state='active', remove_null=True,
kind='docker'):
keys = super_client.list_api_key(accountId=host.accountId)
if len(keys) == 0:
key = super_client.create_api_key(accountId=host.accountId)
key = super_client.wait_success(key)
keys = [key]
return api_client(keys[0].publicValue, keys[0].secretValue)
raise Exception('Failed to find docker host, please register one')
@if_docker
def test_docker_create_only(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
assert image.name == '{}'.format(image.data.dockerImage.fullName,
image.data.dockerImage.id)
assert image.name == TEST_IMAGE_LATEST
assert image.data.dockerImage.repository == 'helloworld'
assert image.data.dockerImage.namespace == 'ibuildthecloud'
assert image.data.dockerImage.tag == 'latest'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_only_from_sha(docker_client, super_client):
image_name = 'tianon/true@sha256:662fc60808e6d5628a090e39' \
'b4bcae694add28a626031cc889109c2cf2af5d73'
uuid = 'docker:' + image_name
container = docker_client.create_container(name='test-sha256',
imageUuid=uuid,
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
assert image.name == '{}'.format(image.data.dockerImage.fullName,
image.data.dockerImage.id)
assert image.name == image_name
assert image.data.dockerImage.repository == 'true'
assert image.data.dockerImage.namespace == 'tianon'
assert image.data.dockerImage.tag == 'sha256:662fc60808e6d5628a090e' \
'39b4bcae694add28a626031cc8891' \
'09c2cf2af5d73'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_with_start(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == TEST_IMAGE_LATEST
assert len(container.volumes()) == 1
image = container.volumes()[0].image()
image = super_client.reload(image)
image_mapping = filter(
lambda m: not m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 1
assert image_mapping[0].imageId == image.id
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_build(docker_client, super_client):
uuid = 'image-' + random_str()
url = 'https://github.com/rancherio/tiny-build/raw/master/build.tar'
container = docker_client.create_container(name='test',
imageUuid='docker:' + uuid,
build={
'context': url,
})
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
# This builds tianon/true which just dies
assert container.state == 'running' or container.state == 'stopped'
assert container.transitioning == 'no'
assert container.data.dockerContainer.Image == uuid + ':latest'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_with_start_using_docker_io(docker_client, super_client):
image = 'docker.io/' + TEST_IMAGE
uuid = 'docker:' + image
container = docker_client.create_container(name='test', imageUuid=uuid)
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == image + ':latest'
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_command(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
command=['sleep', '42'])
try:
container = super_client.wait_success(container)
assert container.data.dockerContainer.Command == 'sleep 42'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_command_args(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
command=['sleep', '1', '2',
'3'])
try:
container = super_client.wait_success(container)
assert container.data.dockerContainer.Command == 'sleep 1 2 3'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_short_lived_container(docker_client, super_client):
container = docker_client.create_container(imageUuid="docker:tianon/true")
container = wait_for_condition(
docker_client, container,
lambda x: x.state == 'stopped',
lambda x: 'State is: ' + x.state)
assert container.state == 'stopped'
assert container.transitioning == 'no'
@if_docker
def test_docker_stop(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
start = time.time()
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
delta = time.time() - start
assert container.state == 'stopped'
assert delta < 10
@if_docker
def test_docker_purge(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
assert container.state == 'stopped'
docker_client.delete(container)
container = docker_client.wait_success(container)
assert container.state == 'removed'
container = docker_client.wait_success(container.purge())
assert container.state == 'purged'
volume = container.volumes()[0]
assert volume.state == 'removed'
volume = docker_client.wait_success(volume.purge())
assert volume.state == 'purged'
@if_docker
def test_docker_image_format(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
try:
container = docker_client.wait_success(container)
container = super_client.reload(container)
assert container.image().format == 'docker'
assert container.volumes()[0].image().format == 'docker'
assert container.volumes()[0].format == 'docker'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_ports_from_container_publish_all(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
name='test',
publishAllPorts=True,
imageUuid=uuid)
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is not None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.wait_success(docker_client.delete(c))
@if_docker
def test_docker_ports_from_container_no_publish(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(name='test',
imageUuid=uuid)
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.wait_success(docker_client.delete(c))
@if_docker
def test_docker_ports_from_container(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
name='test',
startOnCreate=False,
publishAllPorts=True,
imageUuid=uuid,
ports=[
'8081',
'8082/tcp',
'8083/udp'])
c = docker_client.wait_success(c)
assert c.state == 'stopped'
count = 0
for port in c.ports_link():
count += 1
assert port.kind == 'userPort'
assert port.publicPort is None
assert port.privateIpAddressId is None
assert port.publicIpAddressId is None
if port.privatePort == 8081:
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.protocol == 'udp'
else:
assert False
assert count == 3
c = docker_client.wait_success(c.start())
assert c.state == 'running'
network = super_client.reload(c).nics()[0].network()
count = 0
ip = None
privateIp = None
for port in c.ports_link():
count += 1
assert port.privateIpAddressId is not None
privateIp = port.privateIpAddress()
assert privateIp.kind == 'docker'
assert privateIp.networkId == network.id
assert privateIp.network() is not None
assert _(privateIp).subnetId is None
assert port.publicPort is not None
assert port.publicIpAddressId is not None
if ip is None:
ip = port.publicIpAddressId
assert port.publicIpAddressId == ip
if port.privatePort == 8081:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.kind == 'userPort'
assert port.protocol == 'udp'
elif port.privatePort == 8080:
assert port.kind == 'imagePort'
else:
assert False
assert count == 4
assert c.primaryIpAddress == privateIp.address
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'inactive'
assert ip.address is None
assert count == 1
c = docker_client.wait_success(c.start())
assert c.state == 'running'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'active'
assert ip.address is not None
assert count == 1
docker_client.wait_success(docker_client.delete(c))
@if_docker
def test_no_port_override(docker_client, super_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['8081:8080'])
try:
c = super_client.wait_success(c, timeout=240)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
assert ports[0].kind == 'userPort'
assert ports[0].publicPort == 8081
assert ports[0].privatePort == 8080
finally:
if c is not None:
super_client.wait_success(super_client.delete(c))
@if_docker
def test_docker_volumes(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
baz_host_path = '/tmp/baz%s' % bind_mount_uuid
baz_bind_mount = '%s:/baz:ro' % baz_host_path
c = docker_client.create_container(name="volumes_test",
imageUuid=uuid,
startOnCreate=False,
dataVolumes=['/foo',
bar_bind_mount,
baz_bind_mount])
c = docker_client.wait_success(c)
assert len(c.dataVolumes) == 3
assert set(c.dataVolumes) == set(['/foo',
bar_bind_mount,
baz_bind_mount])
c = super_client.wait_success(c.start())
volumes = c.volumes()
assert len(volumes) == 1
mounts = c.mounts()
assert len(mounts) == 3
foo_mount, bar_mount, baz_mount = None, None, None
foo_vol, bar_vol, baz_vol = None, None, None
for mount in mounts:
assert mount.instance().id == c.id
if mount.path == '/foo':
foo_mount = mount
foo_vol = mount.volume()
elif mount.path == '/bar':
bar_mount = mount
bar_vol = mount.volume()
elif mount.path == '/baz':
baz_mount = mount
baz_vol = mount.volume()
assert foo_mount is not None
assert foo_mount.permissions == 'rw'
assert foo_vol is not None
assert foo_vol.state == 'active'
assert _(foo_vol).attachedState == 'inactive'
assert bar_mount is not None
assert bar_mount.permissions == 'rw'
assert bar_vol is not None
assert bar_vol.state == 'active'
assert _(bar_vol).attachedState == 'inactive'
assert baz_mount is not None
assert baz_mount.permissions == 'ro'
assert baz_vol is not None
assert baz_vol.state == 'active'
assert _(baz_vol).attachedState == 'inactive'
assert not foo_vol.isHostPath
assert bar_vol.isHostPath
# We use 'in' instead of '==' because Docker uses the fully qualified
# non-linked path and it might look something like: /mnt/sda1/<path>
assert bar_host_path in bar_vol.uri
assert baz_vol.isHostPath
assert baz_host_path in baz_vol.uri
c2 = docker_client.create_container(name="volumes_from_test",
imageUuid=uuid,
startOnCreate=False,
dataVolumesFrom=[c.id])
c2 = docker_client.wait_success(c2)
assert len(c2.dataVolumesFrom) == 1
assert set(c2.dataVolumesFrom) == set([c.id])
c2 = super_client.wait_success(c2.start())
c2_mounts = c2.mounts()
assert len(c2_mounts) == 3
for mount in c2_mounts:
assert mount.instance().id == c2.id
if mount.path == '/foo':
assert mount.volumeId == foo_vol.id
elif mount.path == '/bar':
assert mount.volumeId == bar_vol.id
elif mount.path == '/baz':
assert mount.volumeId == baz_vol.id
c.stop(remove=True, timeout=0)
c2.stop(remove=True, timeout=0)
_check_path(foo_vol, True, docker_client, super_client)
foo_vol = super_client.wait_success(foo_vol.deactivate())
foo_vol = super_client.wait_success(foo_vol.remove())
foo_vol = super_client.wait_success(foo_vol.purge())
_check_path(foo_vol, False, docker_client, super_client)
_check_path(bar_vol, True, docker_client, super_client)
bar_vol = super_client.wait_success(bar_vol.deactivate())
bar_vol = super_client.wait_success(bar_vol.remove())
bar_vol = super_client.wait_success(bar_vol.purge())
# Host bind mount. Wont actually delete the dir on the host.
_check_path(bar_vol, True, docker_client, super_client)
_check_path(baz_vol, True, docker_client, super_client)
baz_vol = super_client.wait_success(baz_vol.deactivate())
baz_vol = super_client.wait_success(baz_vol.remove())
baz_vol = super_client.wait_success(baz_vol.purge())
# Host bind mount. Wont actually delete the dir on the host.
_check_path(baz_vol, True, docker_client, super_client)
@if_docker
def test_volumes_from_more_than_one_container(docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
dataVolumes=['/foo'])
docker_client.wait_success(c)
c2 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
dataVolumes=['/bar'])
docker_client.wait_success(c2)
c3 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
dataVolumesFrom=[c.id, c2.id])
c3 = docker_client.wait_success(c3)
mounts = c3.mounts()
assert len(mounts) == 2
paths = ['/foo', '/bar']
for m in mounts:
assert m.path in paths
@if_docker
def test_container_fields(docker_client, super_client):
caps = ["SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN",
"SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG",
"MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE",
"MAC_ADMIN", "NET_ADMIN", "SYSLOG", "CHOWN", "NET_RAW",
"DAC_OVERRIDE", "FOWNER", "DAC_READ_SEARCH", "FSETID",
"KILL", "SETGID", "SETUID", "LINUX_IMMUTABLE",
"NET_BIND_SERVICE", "NET_BROADCAST", "IPC_LOCK",
"IPC_OWNER", "SYS_CHROOT", "SYS_PTRACE", "SYS_BOOT",
"LEASE", "SETFCAP", "WAKE_ALARM", "BLOCK_SUSPEND", "ALL"]
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
capAdd=caps,
capDrop=caps,
dnsSearch=['8.8.8.8', '1.2.3.4'],
dns=['8.8.8.8', '1.2.3.4'],
privileged=True,
domainName="rancher.io",
memory=8000000,
memorySwap=16000000,
cpuSet="0,1",
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
cpuShares=400,
restartPolicy=restart_policy,
devices="/dev/null:/dev/xnull:rw")
c = super_client.wait_success(c)
assert set(c.data['dockerInspect']['HostConfig']['CapAdd']) == set(caps)
assert set(c.data['dockerInspect']['HostConfig']['CapDrop']) == set(caps)
actual_dns = c.data['dockerInspect']['HostConfig']['Dns']
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
actual_dns = c.data['dockerInspect']['HostConfig']['DnsSearch']
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
assert c.data['dockerInspect']['HostConfig']['Privileged']
assert c.data['dockerInspect']['Config']['Domainname'] == "rancher.io"
assert c.data['dockerInspect']['Config']['Memory'] == 8000000
assert c.data['dockerInspect']['Config']['MemorySwap'] == 16000000
assert c.data['dockerInspect']['Config']['Cpuset'] == "0,1"
assert c.data['dockerInspect']['Config']['Tty']
assert c.data['dockerInspect']['Config']['OpenStdin']
actual_entry_point = set(c.data['dockerInspect']['Config']['Entrypoint'])
assert actual_entry_point == set(["/bin/sh", "-c"])
assert c.data['dockerInspect']['Config']['CpuShares'] == 400
act_restart_pol = c.data['dockerInspect']['HostConfig']['RestartPolicy']
assert act_restart_pol['MaximumRetryCount'] == 2
assert act_restart_pol['Name'] == "on-failure"
actual_devices = c.data['dockerInspect']['HostConfig']['Devices']
assert len(actual_devices) == 1
assert actual_devices[0]['CgroupPermissions'] == "rw"
assert actual_devices[0]['PathOnHost'] == "/dev/null"
assert actual_devices[0]['PathInContainer'] == "/dev/xnull"
@if_docker
def test_docker_mount_life_cycle(docker_client):
uuid = TEST_IMAGE_UUID
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(name="volumes_test",
imageUuid=uuid,
startOnCreate=False,
dataVolumes=['/foo',
bar_bind_mount])
c = docker_client.wait_success(c)
c = docker_client.wait_success(c.start())
def check_mounts(container, expected_state=None, length=0):
mounts = container.mounts()
assert len(mounts) == length
if expected_state:
for mount in mounts:
assert mount.state == expected_state
return mounts
check_mounts(c, 'active', 2)
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
check_mounts(c, 'inactive', 2)
c = docker_client.wait_success(c.restore())
assert c.state == 'stopped'
check_mounts(c, 'inactive', 2)
c = docker_client.wait_success(c.start())
assert c.state == 'running'
check_mounts(c, 'active', 2)
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
c = docker_client.wait_success(c.purge())
assert c.state == 'purged'
check_mounts(c, 'removed', 2)
@if_docker
def test_docker_labels(docker_client):
image_uuid = 'docker:ranchertest/labelled:v0.1.0'
c = docker_client.create_container(name="labels_test",
imageUuid=image_uuid,
labels={'io.rancher.testlabel.'
'fromapi': 'yes'})
c = docker_client.wait_success(c)
def labels_callback():
labels = c.instanceLabels()
if len(labels) >= 4:
return labels
return None
labels = wait_for(labels_callback)
actual_labels = {}
for l in labels:
actual_labels[l.key] = l.value
expected_labels = {
'io.rancher.testlabel': 'value1',
'io.rancher.testlabel.space': 'value 1',
'io.rancher.testlabel.fromapi': 'yes',
'io.rancher.container.uuid': c.uuid,
'io.rancher.container.ip': c.primaryIpAddress + '/16',
}
assert actual_labels == expected_labels
docker_client.wait_success(docker_client.delete(c))
@if_docker
def test_container_odd_fields(super_client, docker_client):
c = docker_client.create_container(pidMode=None,
imageUuid=TEST_IMAGE_UUID,
logConfig={
'driver': None,
'config': None,
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.logConfig == {'driver': None, 'config': None}
c = super_client.reload(c)
assert c.data.dockerInspect.HostConfig.LogConfig == {'Type': 'json-file',
'Config': None}
@if_docker
def test_container_bad_build(super_client, docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
build={
'context': None,
'remote': None
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.build == {'context': None, 'remote': None}
c = super_client.reload(c)
assert c.data.dockerInspect.Config.Image == TEST_IMAGE_LATEST
@if_docker
def test_service_link_emu_docker_link(super_client, docker_client):
env_name = random_str()
env = docker_client.create_environment(name=env_name)
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'imageUuid': TEST_IMAGE_UUID
}, environmentId=env.id)
service = docker_client.create_service(name='client', launchConfig={
'imageUuid': TEST_IMAGE_UUID
}, environmentId=env.id)
service_link = {"serviceId": server.id, "name": "other"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server)
service = docker_client.wait_success(service)
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
instance = find_one(service.instances)
instance = super_client.reload(instance)
link = find_one(instance.instanceLinks)
target_instance = find_one(server.instances)
assert len(link.ports) == 1
assert link.ports[0].privatePort == 8080
assert link.ports[0].publicPort == 8080
assert link.ports[0].protocol == 'tcp'
assert link.ports[0].ipAddress is not None
assert link.targetInstanceId == target_instance.id
assert link.instanceNames == ['{}_server_1'.format(env_name)]
docker_client.delete(env)
@if_docker
def test_service_links_with_no_ports(docker_client):
env = docker_client.create_environment(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'imageUuid': 'docker:busybox',
'stdinOpen': True,
'tty': True,
}, environmentId=env.id)
server = docker_client.wait_success(server)
assert server.state == 'inactive'
service = docker_client.create_service(name='client', launchConfig={
'imageUuid': 'docker:busybox',
'stdinOpen': True,
'tty': True,
}, environmentId=env.id)
service = docker_client.wait_success(service)
assert service.state == 'inactive'
service_link = {"serviceId": server.id, "name": "bb"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
def _check_path(volume, should_exist, client, super_client):
path = _path_to_volume(volume)
c = client. \
create_container(name="volume_check",
imageUuid="docker:ranchertest/volume-test:v0.1.0",
networkMode=None,
environment={'TEST_PATH': path},
command='/opt/tools/check_path_exists.sh',
dataVolumes=[
'/var/lib/docker:/host/var/lib/docker',
'/tmp:/host/tmp'])
c = super_client.wait_success(c)
assert c.state == 'running'
c = super_client.wait_success(c.stop())
assert c.state == 'stopped'
code = c.data.dockerInspect.State.ExitCode
if should_exist:
# The exit code of the container should be a 10 if the path existed
assert code == 10
else:
# And 11 if the path did not exist
assert code == 11
c.remove()
def _path_to_volume(volume):
path = volume.uri.replace('file://', '')
mounted_path = re.sub('^.*?/var/lib/docker', '/host/var/lib/docker',
path)
if not mounted_path.startswith('/host/var/lib/docker'):
mounted_path = re.sub('^.*?/tmp', '/host/tmp',
path)
return mounted_path
|
willseward/cattle
|
tests/integration/cattletest/core/test_docker.py
|
Python
|
apache-2.0
| 31,065
|
# encoding: utf-8
'''
Created on 27 avr. 2013
@author: guillaume
'''
import sys
print sys.path
from distarkcli.utils.NetInfo import NetInfo
from distarkcli.protos.generic_service_pb2 import PBOneRequest
from distarkcli.protos.generic_service_pb2 import PBOneResponse
from distarkcli.transport.transportpool import ConnectionPoolBorg
class Distarkcli(object):
serviceName = ''
pbrespHandler = None
pbresptype = None
# __requestType=None
objreq = None
__pboreq = None
__pboresp = None
__connection = None
# OUT: PBOnResponse
def getResponse(self):
msg = self.__connection.recv()
if isinstance(msg, list):
if len(msg) == 1:
msg = msg[0]
else:
# multipart response whe can't handle
raise Exception('Distarkcli: multipart response received')
if msg:
self.__pboresp = PBOneResponse()
self.__pboresp.ParseFromString(msg)
# soit une reponse au service, soit une erreur
if (self.__pboresp.rtype == self.pbresptype):
return [self.__pboresp.rtype, self.pbrespHandler(self.__pboresp)]
else:
return [self.__pboresp.rtype, self.__pboresp]
raise Exception('distarkcli: timout')
def fillinGenericRequest(self):
self.__pboreq.greq.servicename = self.serviceName
self.__pboreq.greq.caller = 'Distarkcli'
self.__pboreq.greq.ipadress = NetInfo.getIPString()
def send(self):
# prepare OneRequest
self.__pboreq = PBOneRequest()
self.fillinGenericRequest()
self.objreq.fillinPBOneRequest(self.__pboreq)
# serialize
msg = self.__pboreq.SerializeToString()
# pool=StubSimpleRequestConnectionPool()
# pool=NaiveConnectionPool()
pool = ConnectionPoolBorg()
self.__connection = pool.getConnection()
# TODO: Fix this "echo" for appropriate service discovery
self.__connection.send("echo", msg)
|
GustavePate/distarkcli
|
distarkcli/transport/distarkclient.py
|
Python
|
gpl-3.0
| 2,043
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2017 University of Tübingen, CERN
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2Share cli commands for records."""
from __future__ import absolute_import, print_function
import click
from flask.cli import with_appcontext
import requests
from invenio_db import db
from invenio_pidstore.models import PIDStatus
from invenio_pidstore.providers.datacite import DataCiteProvider
from invenio_records_files.api import Record
from invenio_records.cli import records
from b2share.modules.deposit.api import create_file_pids
from b2share.modules.records.serializers import datacite_v31
from b2share.modules.records.providers import RecordUUIDProvider
from b2share.modules.records.minters import make_record_url, b2share_pid_minter
from b2share.modules.communities.api import Community
from b2share.modules.records.tasks import update_expired_embargoes \
as update_expired_embargoes_task
from .utils import list_db_published_records
from b2share.modules.handle.proxies import current_handle
@records.group()
def manage():
"""B2SHARE record management commands."""
@manage.command()
@with_appcontext
def update_expired_embargoes():
"""Updates all records with expired embargoes to open access."""
update_expired_embargoes_task.delay()
click.secho('Expiring embargoes...', fg='green')
@manage.command()
@with_appcontext
@click.option('-u', '--update', is_flag=True, default=False,
help='updates if necessary')
@click.option('-v', '--verbose', is_flag=True, default=False)
def check_and_update_handle_records(update, verbose):
"""Checks that PIDs of records and files have the mandatory EUDAT entries.
"""
update_msg = 'updated' if update else 'to update'
if verbose:
click.secho('checking PIDs for all records')
for record in list_db_published_records():
pid_list = [p.get('value') for p in record['_pid']
if p.get('type') == 'ePIC_PID']
if pid_list:
pid = pid_list[0]
res = current_handle.check_eudat_entries_in_handle_pid(
handle=pid, update=update
)
if verbose:
if res:
click.secho('{} record PID {} with {}'.format(
update_msg, pid, ", ".join(res.keys())))
else:
click.secho('record PID ok: {}'.format(pid))
for f in record.get('_files', []):
pid = f.get('ePIC_PID')
if pid:
res = current_handle.check_eudat_entries_in_handle_pid(
handle=pid,
fixed=True,
checksum=f.get('checksum'),
checksum_timestamp_iso=record.get('_oai', {}).get('updated'),
update=update)
if verbose:
if res:
click.secho(' {} file PID {} with {}'.format(
update_msg, pid, ", ".join(res.keys())))
elif verbose:
click.secho(' file PID ok: {}'.format(pid))
@manage.command()
@with_appcontext
@click.option('-u', '--update', is_flag=True, default=False)
@click.argument('record_pid', required=True)
def check_handles(update, record_pid):
"""Allocate handles for a record and its files, if necessary."""
rec_pid = RecordUUIDProvider.get(pid_value=record_pid).pid
record = Record.get_record(rec_pid.object_uuid)
record_updated = False
pid_list = [p.get('value') for p in record['_pid']
if p.get('type') == 'ePIC_PID']
if pid_list:
click.secho('record {} already has a handle'.format(record_pid), fg='green')
else:
click.secho('record {} has no handle'.format(record_pid), fg='red')
if update:
b2share_pid_minter(rec_pid, record)
record_updated = True
click.secho(' handle added to record', fg='green')
else:
click.secho('use -u argument to add a handle to the record')
files_ok = True
for f in record.get('_files', []):
if f.get('ePIC_PID'):
click.secho('file {} already has a handle'.format(f.get('key')), fg='green')
else:
click.secho('file {} has no handle'.format(f.get('key')), fg='red')
files_ok = False
if update and not files_ok:
create_file_pids(record)
record_updated = True
click.secho(' files updated with handles', fg='green')
elif not update and not files_ok:
click.secho('use -u argument to add handles to the files')
if record_updated:
record.commit()
db.session.commit()
@manage.command()
@with_appcontext
@click.option('-r', '--record', default=None)
@click.option('-a', '--allrecords', is_flag=True, default=False)
@click.option('-u', '--update', is_flag=True, default=False)
def check_dois(record, allrecords, update):
""" Checks that DOIs of records in the current instance are registered.
"""
if record:
record = Record.get_record(record)
check_record_doi(record, update)
elif allrecords:
click.secho('checking DOI for all records')
for record in list_db_published_records():
check_record_doi(record, update)
else:
raise click.ClickException('Either -r or -a option must be selected')
def check_record_doi(record, update=False):
""" Checks that the DOI of a record is registered."""
recid = record.get('_deposit', {}).get('id')
click.secho('checking DOI for record {}'.format(recid))
doi_list = [DataCiteProvider.get(d.get('value'))
for d in record['_pid']
if d.get('type') == 'DOI']
for doi in doi_list:
if _datacite_doi_reference(doi.pid.pid_value) is None:
if doi.pid.status == PIDStatus.REGISTERED:
# the doi is not truly registered with datacite
click.secho(' {}: not registered with datacite'.format(
doi.pid.pid_value))
doi.pid.status = PIDStatus.RESERVED
click.secho(' {}: {}'.format(doi.pid.pid_value, doi.pid.status))
if doi.pid.status != PIDStatus.RESERVED:
continue
# RESERVED but not REGISTERED
if update:
recid = record.get('_deposit', {}).get('id')
url = make_record_url(recid)
doc = datacite_v31.serialize(doi.pid, record)
_datacite_register_doi(doi, url, doc)
db.session.commit()
click.secho(' registered just now', fg='green', bold=True)
else:
click.secho(' not registered', fg='red', bold=True)
def _datacite_doi_reference(doi_value):
url = "http://doi.org/" + doi_value
res = requests.get(url, allow_redirects=False)
if res.status_code < 200 or res.status_code >= 400:
click.secho(' doi.org returned code {} for {}'.format(
res.status_code, doi_value))
return None
return res.headers['Location']
def _datacite_register_doi(doi, url, doc):
doi.register(url=url, doc=doc)
|
EUDAT-B2SHARE/b2share
|
b2share/modules/records/cli.py
|
Python
|
gpl-2.0
| 8,025
|
from datetime import datetime, timedelta
import random
from itertools import izip
from django.test import TestCase, override_settings
from django.contrib.auth.models import User
from molo.core.tests.base import MoloTestCaseMixin
from molo.profiles import task
import responses
import json
class UserInfoTest(TestCase, MoloTestCaseMixin):
def setUp(self):
'''
populates the database with 20 users with the following properties:
10 users that joined in the last 24 hours
7 users
who joined longer than 24 hours ago
and visited the site in the last 24 hours
3 users
who joined longer than 24 hours ago
and have not visited the site in the last 24 hours
'''
self.mk_main()
now = datetime.now()
# create users that joined today
for x in range(10):
join_datetime = now - timedelta(hours=random.randint(1, 23))
user = User.objects.create_user(
username='tester' + str(x),
email='tester' + str(x) + '@example.com',
password='tester')
user.date_joined = join_datetime
user.last_login = join_datetime
user.save()
join_datetimes = []
login_datetimes = []
# create 10 datetimes not within the previous 24 hours
for x in range(10):
join_datetimes.append(now -
timedelta(days=random.randint(1, 1000)))
# create 7 datetimes that are within that past 24 hours
for x in range(7):
login_datetimes.append(now -
timedelta(hours=random.randint(1, 23)))
# create last login that's after the joined date
# but before 24 hours before today
for x in range(3):
temp_datetime = 0
while(True):
temp_datetime = (now - timedelta(days=random.randint(1, 1000)))
if((temp_datetime < (now - timedelta(hours=24)))and
(temp_datetime > join_datetimes[7 + x])):
break
login_datetimes.append(temp_datetime)
# create the users
count = 10
for join_datetime, login_datetime in izip(join_datetimes,
login_datetimes):
user = User.objects.create_user(
username='tester' + str(count),
email='tester' + str(count) + '@example.com',
password='tester')
user.date_joined = join_datetime
user.last_login = login_datetime
user.save()
count += 1
# setup response
responses.add(
responses.POST,
'http://testserver:8080/',
status=200, content_type='application/json',
body=json.dumps({}))
def test_new_user_count(self):
self.assertEqual(task.get_count_of_new_users(), 10)
def test_returning_user_count(self):
self.assertEqual(task.get_count_of_returning_users(), 7)
def test_total_user_count(self):
self.assertEqual(task.get_count_of_all_users(), 20)
def test_user_info_message(self):
self.assertEqual(task.get_message_text(),
("DAILY UPDATE ON USER DATA\n"
"New User - joined in the last 24 hours\n"
"Returning User - joined longer than 24 hours ago"
"and visited the site in the last 24 hours\n"
"```"
"Total Users: 20\n"
"New Users: 10\n"
"Returning Users: 7"
"```"))
@responses.activate
@override_settings(SLACK_INCOMING_WEBHOOK_URL="http://testserver:8080/")
def test_send_user_data_to_slack(self):
task.send_user_data_to_slack()
|
praekelt/molo.profiles
|
molo/profiles/tests/test_user_info_task.py
|
Python
|
bsd-2-clause
| 3,932
|
################################################################################
#
# Copyright 2014 William Barsse
#
################################################################################
#
# This file is part of ToySM Extensions.
#
# ToySM Extensions is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ToySM Extensions is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ToySM. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from __future__ import print_function
import sys
import unittest
import logging
LOG_LEVEL = logging.INFO
#LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=LOG_LEVEL)
SCAPY_COMPAT = sys.version_info.major < 3
if SCAPY_COMPAT:
from scapy.all import *
from toysm.ext.scapy import *
from toysm.ext.scapy import HAVE_PIPETOOL
import scapy.pipetool as pt
from toysm import *
@unittest.skipIf(not SCAPY_COMPAT,
'Scapy is Python2 only, for now at least')
class TestPacketTransition(unittest.TestCase):
def test_match_packet_class_tmpl(self):
self.assertTrue(
match_packet(IP, IP(str(IP()))))
def test_match_packet_field_match(self):
self.assertTrue(
match_packet(IP(), IP(str(IP()))))
self.assertFalse(
match_packet(IP(src='1.2.3.4'),
IP(str(IP(src='2.3.4.5')))))
self.assertFalse(
match_packet(IP(src='1.2.3.4'),
IP(str(IP(dst='1.2.3.4')))))
def test_match_packet_find_layer(self):
self.assertTrue(
match_packet(IP(src='1.2.3.4'),
Ether(str(Ether()/IP(src='1.2.3.4')/UDP()))))
def test_match_packet_skip_layer(self):
self.assertTrue(
match_packet(IP(src='1.2.3.4')/DNS(id=1),
Ether(str(Ether()/IP(src='1.2.3.4')/UDP()/DNS(id=1)))))
self.assertFalse(
match_packet(IP(src='4.5.6.7')/DNS(id=1),
Ether(str(Ether()/IP(src='1.2.3.4')/UDP()/DNS(id=1)))))
self.assertFalse(
match_packet(IP(src='1.2.3.4')/DNS(id=2),
Ether(str(Ether()/IP(src='1.2.3.4')/UDP()/DNS(id=1)))))
def test_match_forbid_pld(self):
self.assertTrue(
match_packet(IP(src='1.2.3.4')/ForbidPayload(),
Ether(str(Ether()/IP(src='1.2.3.4')))))
self.assertFalse(
match_packet(IP(src='1.2.3.4')/ForbidPayload(),
Ether(str(Ether()/IP(src='1.2.3.4')/UDP()))))
def test_match_missing_pld(self):
self.assertFalse(
match_packet(IP()/UDP()/DNS(),
Ether()/IP()))
def test_match_list(self):
self.assertTrue(
match_packet(IP(src='1.2.3.4/30'),
Ether(str(Ether()/IP(src='1.2.3.4')))))
self.assertTrue(
match_packet(IP(src='1.2.3.4/30'),
Ether(str(Ether()/IP(src='1.2.3.5')))))
def test_simple(self):
s1 = State('ready')
s2 = State('wait reply')
fs = FinalState()
s3 = State('timedout')
actions = []
s1 >> CompletionTransition(action=lambda sm, e: actions.append('echo request')) \
>> s2 >> PacketTransition(IP()/ICMP(type='echo-reply'),
action=lambda sm,e: actions.append('echo reply')) \
>> fs
s2 >> Timeout(2) >> s3 \
>> CompletionTransition(action=lambda sm, e: actions.append('Timed out')) >> fs
sm = StateMachine(s1, s2, s3, fs)
#sm.graph()
sm.start()
sm.post(Ether(str(Ether()/IP()/ICMP(type='echo-reply'))))
self.assertTrue(sm.join(1))
self.assertEqual(['echo request', 'echo reply'], actions)
@unittest.skipIf(not (SCAPY_COMPAT and HAVE_PIPETOOL),
'Need scapy version that includes Pipetool')
class TestSMBox(unittest.TestCase):
def test_simple(self):
s1 = State('s1')
smbox = SMBox(s1 >> EqualsTransition('a', action=lambda sm,e: sm.send('Got %s'%e))
>> FinalState('fs'))
feeder = pt.CLIFeeder()
queue = pt.QueueSink()
feeder > smbox > queue
engine = pt.PipeEngine(feeder)
engine.start()
feeder.send('a')
feeder.close()
engine.wait_and_stop()
self.assertEqual('Got a', queue.recv())
if __name__ == '__main__':
unittest.main()
# vim:expandtab:sw=4:sts=4
|
willakat/toysm
|
toysm/ext/tests/test_scapy.py
|
Python
|
lgpl-3.0
| 4,998
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
class Account(ndb.Model):
view_counter = ndb.IntegerProperty()
class MyRequestHandler(webapp2.RequestHandler):
def get(self):
acct = Account.get_by_id(users.get_current_user().user_id())
acct.view_counter += 1
future = acct.put_async()
# ...read something else from Datastore...
self.response.out.write('Content of the page')
future.get_result()
app = webapp2.WSGIApplication([('/', MyRequestHandler)])
|
clarko1/Cramd
|
appengine/standard/ndb/async/app_async.py
|
Python
|
apache-2.0
| 1,155
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
from sextante.outputs.OutputFile import OutputFile
from sextante.parameters.ParameterNumber import ParameterNumber
class lassplit(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "Tools"
self.addParameter(ParameterFile(lassplit.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "Point in each output file", 1, None, 1000000))
self.addOutput(OutputFile(lassplit.OUTPUT, "Output las file basename"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lassplit.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lassplit.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lassplit.OUTPUT))
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)
|
slarosa/QGIS
|
python/plugins/sextante/lidar/lastools/lassplit.py
|
Python
|
gpl-2.0
| 2,455
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The mock module allows easy mocking of apitools clients.
This module allows you to mock out the constructor of a particular apitools
client, for a specific API and version. Then, when the client is created, it
will be run against an expected session that you define. This way code that is
not aware of the testing framework can construct new clients as normal, as long
as it's all done within the context of a mock.
"""
import difflib
import six
from apitools.base.protorpclite import messages
from apitools.base.py import base_api
from apitools.base.py import encoding
from apitools.base.py import exceptions
class Error(Exception):
"""Exceptions for this module."""
def _MessagesEqual(msg1, msg2):
"""Compare two protorpc messages for equality.
Using python's == operator does not work in all cases, specifically when
there is a list involved.
Args:
msg1: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
msg2: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
Returns:
If the messages are isomorphic.
"""
if isinstance(msg1, list) and isinstance(msg2, list):
if len(msg1) != len(msg2):
return False
return all(_MessagesEqual(x, y) for x, y in zip(msg1, msg2))
if (not isinstance(msg1, messages.Message) or
not isinstance(msg2, messages.Message)):
return msg1 == msg2
for field in msg1.all_fields():
field1 = getattr(msg1, field.name)
field2 = getattr(msg2, field.name)
if not _MessagesEqual(field1, field2):
return False
return True
class UnexpectedRequestException(Error):
def __init__(self, received_call, expected_call):
expected_key, expected_request = expected_call
received_key, received_request = received_call
expected_repr = encoding.MessageToRepr(
expected_request, multiline=True)
received_repr = encoding.MessageToRepr(
received_request, multiline=True)
expected_lines = expected_repr.splitlines()
received_lines = received_repr.splitlines()
diff_lines = difflib.unified_diff(expected_lines, received_lines)
diff = '\n'.join(diff_lines)
if expected_key != received_key:
msg = '\n'.join((
'expected: {expected_key}({expected_request})',
'received: {received_key}({received_request})',
'',
)).format(
expected_key=expected_key,
expected_request=expected_repr,
received_key=received_key,
received_request=received_repr)
super(UnexpectedRequestException, self).__init__(msg)
else:
msg = '\n'.join((
'for request to {key},',
'expected: {expected_request}',
'received: {received_request}',
'diff: {diff}',
'',
)).format(
key=expected_key,
expected_request=expected_repr,
received_request=received_repr,
diff=diff)
super(UnexpectedRequestException, self).__init__(msg)
class ExpectedRequestsException(Error):
def __init__(self, expected_calls):
msg = 'expected:\n'
for (key, request) in expected_calls:
msg += '{key}({request})\n'.format(
key=key,
request=encoding.MessageToRepr(request, multiline=True))
super(ExpectedRequestsException, self).__init__(msg)
class _ExpectedRequestResponse(object):
"""Encapsulation of an expected request and corresponding response."""
def __init__(self, key, request, response=None, exception=None):
self.__key = key
self.__request = request
if response and exception:
raise exceptions.ConfigurationValueError(
'Should specify at most one of response and exception')
if response and isinstance(response, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Responses should not be an instance of Error')
if exception and not isinstance(exception, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Exceptions must be instances of Error')
self.__response = response
self.__exception = exception
@property
def key(self):
return self.__key
@property
def request(self):
return self.__request
def ValidateAndRespond(self, key, request):
"""Validate that key and request match expectations, and respond if so.
Args:
key: str, Actual key to compare against expectations.
request: protorpc.messages.Message or [protorpc.messages.Message]
or number or string, Actual request to compare againt expectations
Raises:
UnexpectedRequestException: If key or request dont match
expectations.
apitools_base.Error: If a non-None exception is specified to
be thrown.
Returns:
The response that was specified to be returned.
"""
if key != self.__key or not _MessagesEqual(request, self.__request):
raise UnexpectedRequestException((key, request),
(self.__key, self.__request))
if self.__exception:
# Can only throw apitools_base.Error.
raise self.__exception # pylint: disable=raising-bad-type
return self.__response
class _MockedService(base_api.BaseApiService):
def __init__(self, key, mocked_client, methods, real_service):
super(_MockedService, self).__init__(mocked_client)
self.__dict__.update(real_service.__dict__)
for method in methods:
real_method = None
if real_service:
real_method = getattr(real_service, method)
setattr(self, method,
_MockedMethod(key + '.' + method,
mocked_client,
real_method))
class _MockedMethod(object):
"""A mocked API service method."""
def __init__(self, key, mocked_client, real_method):
self.__key = key
self.__mocked_client = mocked_client
self.__real_method = real_method
def Expect(self, request, response=None, exception=None, **unused_kwargs):
"""Add an expectation on the mocked method.
Exactly one of response and exception should be specified.
Args:
request: The request that should be expected
response: The response that should be returned or None if
exception is provided.
exception: An exception that should be thrown, or None.
"""
# TODO(jasmuth): the unused_kwargs provides a placeholder for
# future things that can be passed to Expect(), like special
# params to the method call.
# pylint: disable=protected-access
# Class in same module.
self.__mocked_client._request_responses.append(
_ExpectedRequestResponse(self.__key,
request,
response=response,
exception=exception))
# pylint: enable=protected-access
def __call__(self, request, **unused_kwargs):
# TODO(jasmuth): allow the testing code to expect certain
# values in these currently unused_kwargs, especially the
# upload parameter used by media-heavy services like bigquery
# or bigstore.
# pylint: disable=protected-access
# Class in same module.
if self.__mocked_client._request_responses:
request_response = self.__mocked_client._request_responses.pop(0)
else:
raise UnexpectedRequestException(
(self.__key, request), (None, None))
# pylint: enable=protected-access
response = request_response.ValidateAndRespond(self.__key, request)
if response is None and self.__real_method:
response = self.__real_method(request)
print(encoding.MessageToRepr(
response, multiline=True, shortstrings=True))
return response
return response
def _MakeMockedServiceConstructor(mocked_service):
def Constructor(unused_self, unused_client):
return mocked_service
return Constructor
class Client(object):
"""Mock an apitools client."""
def __init__(self, client_class, real_client=None):
"""Mock an apitools API, given its class.
Args:
client_class: The class for the API. eg, if you
from apis.sqladmin import v1beta3
then you can pass v1beta3.SqladminV1beta3 to this class
and anything within its context will use your mocked
version.
real_client: apitools Client, The client to make requests
against when the expected response is None.
"""
if not real_client:
real_client = client_class(get_credentials=False)
self.__client_class = client_class
self.__real_service_classes = {}
self.__real_client = real_client
self._request_responses = []
self.__real_include_fields = None
def __enter__(self):
return self.Mock()
def Mock(self):
"""Stub out the client class with mocked services."""
client = self.__real_client or self.__client_class(
get_credentials=False)
for name in dir(self.__client_class):
service_class = getattr(self.__client_class, name)
if not isinstance(service_class, type):
continue
if not issubclass(service_class, base_api.BaseApiService):
continue
self.__real_service_classes[name] = service_class
service = service_class(client)
# pylint: disable=protected-access
# Some liberty is allowed with mocking.
collection_name = service_class._NAME
# pylint: enable=protected-access
api_name = '%s_%s' % (self.__client_class._PACKAGE,
self.__client_class._URL_VERSION)
mocked_service = _MockedService(
api_name + '.' + collection_name, self,
service._method_configs.keys(),
service if self.__real_client else None)
mocked_constructor = _MakeMockedServiceConstructor(mocked_service)
setattr(self.__client_class, name, mocked_constructor)
setattr(self, collection_name, mocked_service)
self.__real_include_fields = self.__client_class.IncludeFields
self.__client_class.IncludeFields = self.IncludeFields
return self
def __exit__(self, exc_type, value, traceback):
self.Unmock()
if value:
six.reraise(exc_type, value, traceback)
return True
def Unmock(self):
for name, service_class in self.__real_service_classes.items():
setattr(self.__client_class, name, service_class)
delattr(self, service_class._NAME)
self.__real_service_classes = {}
if self._request_responses:
raise ExpectedRequestsException(
[(rq_rs.key, rq_rs.request) for rq_rs
in self._request_responses])
self._request_responses = []
self.__client_class.IncludeFields = self.__real_include_fields
self.__real_include_fields = None
def IncludeFields(self, include_fields):
if self.__real_client:
return self.__real_include_fields(self.__real_client,
include_fields)
|
catapult-project/catapult-csm
|
third_party/google-endpoints/apitools/base/py/testing/mock.py
|
Python
|
bsd-3-clause
| 12,590
|
#!/usr/bin/python
#===============================================================================
# Copyright (c) 2016, James Ottinger. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# MoneyWatch - https://github.com/jamesottinger/moneywatch
#=======================================================+=======================
db_creds = { 'host':'xxx.xxx.xxx.xxx', 'user':'youruser', 'passwd':'yourpassword', 'db':'thedbname' }
dirlogs = '/log/'
direrrors = '/log/errors/'
weather = { 'latitude':'39.7344443', 'longitude':'-75.0072787', 'title':'Chipotle - Sicklerville, NJ' }
alphavantage_apikey = "----"
uilinks = [
[
('Google Drive', 'https://drive.google.com/#my-drive')
], [
('Capital One 360', 'https://home.capitalone360.com'),
('Discover Bank', 'https://www.discover.com/online-banking/')
], [
('Fidelity', 'https://www.fidelity.com'),
('Vanguard', 'https://www.vanguard.com/')
]
]
|
jamesottinger/MoneyWatch
|
moneywatch/moneywatchconfig.py
|
Python
|
bsd-3-clause
| 1,109
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel_wrapper."""
import unittest
from absl.testing import absltest
from dm_env import specs
import numpy as np
SKIP_OPEN_SPIEL_TESTS = False
SKIP_OPEN_SPIEL_MESSAGE = 'open_spiel not installed.'
try:
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
from acme.wrappers import open_spiel_wrapper
from open_spiel.python import rl_environment
# pytype: enable=import-error
except ModuleNotFoundError:
SKIP_OPEN_SPIEL_TESTS = True
@unittest.skipIf(SKIP_OPEN_SPIEL_TESTS, SKIP_OPEN_SPIEL_MESSAGE)
class OpenSpielWrapperTest(absltest.TestCase):
def test_tic_tac_toe(self):
raw_env = rl_environment.Environment('tic_tac_toe')
env = open_spiel_wrapper.OpenSpielWrapper(raw_env)
# Test converted observation spec.
observation_spec = env.observation_spec()
self.assertEqual(type(observation_spec), open_spiel_wrapper.OLT)
self.assertEqual(type(observation_spec.observation), specs.Array)
self.assertEqual(type(observation_spec.legal_actions), specs.Array)
self.assertEqual(type(observation_spec.terminal), specs.Array)
# Test converted action spec.
action_spec: specs.DiscreteArray = env.action_spec()
self.assertEqual(type(action_spec), specs.DiscreteArray)
self.assertEqual(action_spec.shape, ())
self.assertEqual(action_spec.minimum, 0)
self.assertEqual(action_spec.maximum, 8)
self.assertEqual(action_spec.num_values, 9)
self.assertEqual(action_spec.dtype, np.dtype('int32'))
# Test step.
timestep = env.reset()
self.assertTrue(timestep.first())
_ = env.step([0])
env.close()
if __name__ == '__main__':
absltest.main()
|
deepmind/acme
|
acme/wrappers/open_spiel_wrapper_test.py
|
Python
|
apache-2.0
| 2,288
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/tom/x_os/src/satellite_dish_driver/include".split(';') if "/home/tom/x_os/src/satellite_dish_driver/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gait_generation;ihmc_msgs;roscpp;std_msgs;tf2_ros;trajectory_generation".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsatellite_dish_driver".split(';') if "-lsatellite_dish_driver" != "" else []
PROJECT_NAME = "satellite_dish_driver"
PROJECT_SPACE_DIR = "/home/tom/x_os/devel"
PROJECT_VERSION = "0.0.0"
|
Txion001/src_x_os
|
x_os/build/satellite_dish_driver/catkin_generated/pkg.develspace.context.pc.py
|
Python
|
apache-2.0
| 588
|
"""Fixer that addes parentheses where they are required
This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
# By Taek Joo Kim and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import LParen, RParen
# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
class FixParen(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
atom< ('[' | '(')
(listmaker< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>
|
testlist_gexp< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>)
(']' | ')') >
"""
def transform(self, node, results):
target = results["target"]
lparen = LParen()
lparen.prefix = target.prefix
target.prefix = u"" # Make it hug the parentheses
target.insert_child(0, lparen)
target.append_child(RParen())
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/lib2to3/fixes/fix_paren.py
|
Python
|
bsd-3-clause
| 1,272
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import errno
import re
import time
import unittest
from subprocess import PIPE
import subprocess
from system_test import main_module, SkipIfNeeded, TestCase
from system_test import Qdrouterd, TIMEOUT, AsyncTestSender, AsyncTestReceiver
try:
import queue as Queue # 3.x
except ImportError:
import Queue as Queue # 2.7
from threading import Thread
from threading import Event
import uuid
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
class ConsolePreReq(object):
@staticmethod
def is_cmd(name):
''' determine if a command is present and executes on the system '''
try:
devnull = open(os.devnull, "w")
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if errno == errno.ENOENT:
return False
return True
@staticmethod
def should_skip():
try:
found_npm = ConsolePreReq.is_cmd('npm')
return not found_npm
except OSError:
return True
class ConsoleTest(TestCase):
"""Run npm console tests"""
@classmethod
def setUpClass(cls):
super(ConsoleTest, cls).setUpClass()
def router(name, mode, extra):
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'role': 'normal', 'port': cls.tester.get_port()})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.http_port = cls.tester.get_port()
cls.sender_port = cls.tester.get_port()
cls.receiver_port = cls.tester.get_port()
router('A', 'interior',
[('listener', {'role': 'inter-router', 'port': interrouter_port}),
('listener', {'role': 'normal', 'port': cls.sender_port}),
('listener', {'role': 'normal', 'port': cls.http_port, 'http': True})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('B', 'interior',
[('connector', {'name': 'connectorToA', 'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'normal', 'port': cls.receiver_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
cls.INT_A.wait_router_connected('B')
cls.INT_B.wait_router_connected('A')
def run_console_test(self):
address = "toB"
# create a slow receiver so that we get delayedDeliveries
receiver = AsyncSlowReceiver(self.INT_B.listener, address)
sender = AsyncStopableSender(self.INT_A.listener, address)
pret = 0
out = ''
prg = ['npm', 'test', '--', '--watchAll=false']
p = self.popen(prg,
cwd=os.path.join(os.environ.get('BUILD_DIR'), 'console'),
env=dict(os.environ, TEST_PORT="%d" % self.http_port),
stdout=PIPE,
expect=None)
out = p.communicate()[0]
pret = p.returncode
# write the output
with open('run_console_test.out', 'w') as popenfile:
popenfile.write('returncode was %s\n' % p.returncode)
popenfile.write('out was:\n')
popenfile.writelines(str(out))
sender.stop()
receiver.stop()
time.sleep(1)
assert pret == 0, \
"console test exit status %d, output:\n%s" % (pret, out)
return out
# If we are unable to run the npm command. Skip the test
@SkipIfNeeded(ConsolePreReq.should_skip(), 'Test skipped: npm command not found')
def test_console(self):
self.run_console_test()
class AsyncStopableSender(AsyncTestSender):
def __init__(self, hostport, address):
super(AsyncStopableSender, self).__init__(hostport, address, 999999999)
self._stop_thread = False
self.sent = 0
def _main(self):
self._container.start()
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
def on_sendable(self, event):
self._sender.send(Message(body="message %d" % self.sent))
self.sent += 1
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
if self._thread.is_alive():
raise Exception("AsyncStopableSender did not exit")
# Based on gsim's slow_recv.py
class TimedFlow(MessagingHandler):
def __init__(self, receiver, credit):
super(TimedFlow, self).__init__()
self.receiver = receiver
self.credit = credit
def on_timer_task(self, event):
self.receiver.flow(self.credit)
class AsyncSlowReceiver(AsyncTestReceiver):
def __init__(self, hostport, target):
super(AsyncSlowReceiver, self).__init__(hostport, target, msg_args={"prefetch": 0})
def on_link_opened(self, event):
super(AsyncSlowReceiver, self).on_link_opened(event)
self.request_batch(event)
def request_batch(self, event):
event.container.schedule(1, TimedFlow(event.receiver, 10))
def check_empty(self, receiver):
return not receiver.credit and not receiver.queued
def on_link_flow(self, event):
if self.check_empty(event.receiver):
self.request_batch(event)
def on_message(self, event):
print (event.message.body)
if self.check_empty(event.receiver):
self.request_batch(event)
if __name__ == '__main__':
unittest.main(main_module())
|
bhardesty/qpid-dispatch
|
tests/system_tests_console.py
|
Python
|
apache-2.0
| 6,863
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 16:27:28 2017
@author: sebastian
"""
## retrieve auxiliary ECMWF forecast data, to augment temperature forecasts
## based on example from
## https://software.ecmwf.int/wiki/display/WEBAPI/TIGGE+retrieval+efficiency
## surface variables: (see https://software.ecmwf.int/wiki/display/TIGGE/Parameters)
# cloud cover, surface pressure, CAPE
# ECMWF forecasts from TIGGE data set:
# variables: 59/134/228164
# all available full years, 2007-2016
# init time 00 UTC
# 36/48 h ahead forecasts (= valid at 12 UTC and 00 UTC)
# 0.5° resolution
# area: -10E, 30E; 30N, 70N (large part of Europe centered around Germany)
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
def retrieve_tigge_data():
date1 = [str(i) + "-01-01" for i in xrange(2007,2017)]
date2 = [str(i) + "-12-31" for i in xrange(2007,2017)]
dates = date1
for j in range(0,10):
dates[j] = date1[j] + "/to/" + date2[j]
data_dir = "/media/sebastian/Elements/Postproc_NN/data/forecasts/auxiliary/"
for date in dates:
target = data_dir + "ecmwf_aux_surface_" + date[:4] + ".grib"
tigge_request(date, target)
def tigge_request(date, target):
'''
A TIGGE request for ECMWF perturbed forecasts of auxiliary surface variables.
'''
server.retrieve({
'origin' : "ecmf",
'levtype' : "sfc",
'number' : mem_numbers,
'expver' : "prod",
'dataset' : "tigge",
'step' : "36/48",
'grid' : "0.5/0.5",
'param' : "59/134/228164",
'area' : "70/-10/30/30",
'time' : "00",
'date' : date,
'type' : "pf",
'class' : "ti",
'target' : target,
})
if __name__ == '__main__':
mem_numbers = ''.join([''.join([str(i) + "/" for i in xrange(1,50)]),'50'])
retrieve_tigge_data()
|
slerch/ppnn
|
data_retrieval/forecasts/retrieve_ecmwf_auxiliary_surface_data.py
|
Python
|
mit
| 1,974
|
#!/usr/bin/python
# Import the modules to send commands to the system and access GPIO pins
import RPi.GPIO as gpio
import syslog
import os
import sys
import signal
from time import sleep
def shutdown():
# Shutdown && log
syslog.syslog('pishutdown: Soft shutdown')
gpio.cleanup()
os.system('shutdown now -h')
sys.exit(0)
def reboot():
# Reboot && log
syslog.syslog('pishutdown: Soft reboot')
gpio.cleanup()
os.system('reboot')
sys.exit(0)
def cleanup(signum, frame):
syslog.syslog('pishutdown: Caught a signal, shutting down')
gpio.cleanup()
sys.exit(1)
def wait_for(value, timeout=-1):
while True:
channel = gpio.wait_for_edge(40, gpio.RISING if value else gpio.FALLING, timeout=timeout)
if channel == None: return None
# Debounce
for i in range(10):
sleep(0.001)
curr_value = gpio.input(40) == gpio.HIGH
if curr_value != value: break
if i == 9: return channel
signal.signal(signal.SIGINT, cleanup)
#Set pin numbering to board numbering
gpio.setmode(gpio.BOARD)
gpio.setup(38, gpio.OUT)
gpio.output(38, gpio.HIGH)
# Enable pull-down for safety
gpio.setup(40, gpio.IN, pull_up_down=gpio.PUD_DOWN)
try:
wait_for(True)
if wait_for(False, 500) != None:
if wait_for(True, 500) != None:
reboot()
shutdown()
except KeyboardInterrupt:
print 'Keyboard interrupt'
gpio.cleanup()
sys.exit(1)
except Exception as e:
syslog.syslog('pishutdown: Exception: ' + str(e))
|
anyWareSculpture/physical
|
RaspberryPi/pishutdown.py
|
Python
|
mit
| 1,505
|
#
# ParseFeed
# Author: Reuben Joseph <reubenej@gmail.com>
#
# -*- coding:utf-8 -*-
import feedparser
import json
import os.path
# Basic configuration details
ROOT_DIR = os.path.dirname(os.path.abspath('conf/'))
CONFIG_PATH = os.path.join(ROOT_DIR, 'conf/feeds.conf')
def main():
file = open(CONFIG_PATH, 'r')
feed_config = json.loads(open(file).read())
print(feed_config)
if __name__ == "__main__":
main()
|
rjosph/munger
|
munger/ParseFeed.py
|
Python
|
mit
| 427
|
"""Alignment with SNAP: http://snap.cs.berkeley.edu/
"""
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.ngsalign import novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
TODO: Use streaming with new development version of SNAP to feed into
structural variation preparation de-duplication.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
assert not data.get("align_split"), "Split alignments not supported with SNAP"
snap = config_utils.get_program("snap", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
resources = config_utils.get_resources("snap", data["config"])
rg_info = novoalign.get_rg_info(names)
is_paired = bam.is_paired(fastq_file) if fastq_file.endswith(".bam") else pair_file
if not utils.file_exists(out_file):
with postalign.tobam_cl(data, out_file, is_paired) as (tobam_cl, tx_out_file):
cmd_name = "paired" if is_paired else "single"
cmd = ("{snap} {cmd_name} {index_dir} {fastq_file} {pair_file} "
"-R '{rg_info}' -t {num_cores} -M -o -sam - | ")
do.run(cmd.format(**locals()) + tobam_cl, "SNAP alignment: %s" % names["sample"])
data["work_bam"] = out_file
return data
def align_bam(bam_file, index_dir, names, align_dir, data):
return align(bam_file, None, index_dir, names, align_dir, data)
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "snap_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to snap reference directory, using standard layout.
"""
snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap")
assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir
return snap_dir
|
gifford-lab/bcbio-nextgen
|
bcbio/ngsalign/snap.py
|
Python
|
mit
| 2,111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.