repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
akshitjain/dcos-cassandra-service
|
integration/tests/defaults.py
|
Python
|
apache-2.0
| 678
| 0
|
import
|
os
import shakedown
DEFAULT_NODE_COUNT = 3
PACKAGE_NAME = 'mds-cassandra'
TASK_RUNNING_STATE
|
= 'TASK_RUNNING'
DCOS_URL = shakedown.run_dcos_command('config show core.dcos_url')[0].strip()
# expected SECURITY values: 'permissive', 'strict', 'disabled'
if os.environ.get('SECURITY', '') == 'strict':
print('Using strict mode test configuration')
PRINCIPAL = 'service-acct'
DEFAULT_OPTIONS_DICT = {
"service": {
"user": "nobody",
"principal": PRINCIPAL,
"secret_name": "secret"
}
}
else:
print('Using default test configuration')
PRINCIPAL = 'cassandra-principal'
DEFAULT_OPTIONS_DICT = {}
|
manelore/django-haystack
|
tests/elasticsearch_tests/tests/__init__.py
|
Python
|
bsd-3-clause
| 227
| 0.013216
|
import warnings
warnings.simplefilter('ignore', Warning)
from elasticsearch_tests.tests.inputs import *
f
|
rom elasticsearch_tests.tests.elasticsearch_query import *
from elasticsearch_tests.tests.elasticsearch_backend imp
|
ort *
|
chrisspen/django-feeds
|
djangofeeds/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 10,467
| 0.006879
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'djangofeeds_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('domain', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal(u'djangofeeds', ['Category'])
# Adding unique constraint on 'Category', fields ['name', 'domain']
db.create_unique(u'djangofeeds_category', ['name', 'domain'])
# Adding model 'Feed'
db.create_table(u'djangofeeds_feed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('feed_url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('link', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('http_etag', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('http_last_modified', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_last_refresh', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('ratio', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('sort', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('date_last_requested', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('freq', self.gf('django.db.models.fields.IntegerField')(default=10800)),
))
db.send_create_signal(u'djangofeeds', ['Feed'])
# Adding M2M table for field categories on 'Feed'
m2m_table_name = db.shorten_name(u'djangofeeds_feed_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feed', models.ForeignKey(orm[u'djangofeeds.feed'], null=False)),
('category', models.ForeignKey(orm[u'djangofeeds.category'], null=False))
))
db.create_unique(m2m_table_name, ['feed_id', 'category_id'])
# Adding model 'Enclosure'
db.create_table(u'djangofeeds_enclosure', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('type', self.gf('django.db.models.fields.CharField')(max_length=200)),
('length', self.gf('django.db.models.fields.PositiveIntege
|
rField')(default=0)),
))
db.send_create_signal(u'djangofeeds', ['Enclosure'])
# Adding model 'Post'
db.create_table(u'djangofeeds_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKe
|
y')(to=orm['djangofeeds.Feed'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('link', self.gf('django.db.models.fields.URLField')(max_length=2048)),
('content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('guid', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('date_published', self.gf('django.db.models.fields.DateField')()),
('date_updated', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'djangofeeds', ['Post'])
# Adding M2M table for field enclosures on 'Post'
m2m_table_name = db.shorten_name(u'djangofeeds_post_enclosures')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'djangofeeds.post'], null=False)),
('enclosure', models.ForeignKey(orm[u'djangofeeds.enclosure'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'enclosure_id'])
# Adding M2M table for field categories on 'Post'
m2m_table_name = db.shorten_name(u'djangofeeds_post_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'djangofeeds.post'], null=False)),
('category', models.ForeignKey(orm[u'djangofeeds.category'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'category_id'])
def backwards(self, orm):
# Removing unique constraint on 'Category', fields ['name', 'domain']
db.delete_unique(u'djangofeeds_category', ['name', 'domain'])
# Deleting model 'Category'
db.delete_table(u'djangofeeds_category')
# Deleting model 'Feed'
db.delete_table(u'djangofeeds_feed')
# Removing M2M table for field categories on 'Feed'
db.delete_table(db.shorten_name(u'djangofeeds_feed_categories'))
# Deleting model 'Enclosure'
db.delete_table(u'djangofeeds_enclosure')
# Deleting model 'Post'
db.delete_table(u'djangofeeds_post')
# Removing M2M table for field enclosures on 'Post'
db.delete_table(db.shorten_name(u'djangofeeds_post_enclosures'))
# Removing M2M table for field categories on 'Post'
db.delete_table(db.shorten_name(u'djangofeeds_post_categories'))
models = {
u'djangofeeds.category': {
'Meta': {'unique_together': "(('name', 'domain'),)", 'object_name': 'Category'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'djangofeeds.enclosure': {
'Meta': {'object_name': 'Enclosure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'djangofeeds.feed': {
'Meta': {'ordering': "('id',)", 'object_name': 'Feed'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangofeeds.Category']", 'symmetrical': 'False'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_refresh': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_last_requested': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length
|
louistin/thinkstation
|
a_byte_of_python/unit_9_module/mymodule_demo3.py
|
Python
|
mit
| 114
| 0.009434
|
#!/usr/bin/python3
from mymodule import *
sayhi()
# __versi
|
on__ 不会导入
# print('Version: ', __ve
|
rsion__)
|
tjanez/bup
|
cmd/daemon-cmd.py
|
Python
|
lgpl-2.1
| 2,128
| 0.003289
|
#!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
import sys, getopt, socket, subprocess, fcntl
from bup import options, path
from bup.helpers import *
optspec = """
bup daemon [options...] -- [bup-server options...]
--
l,listen ip address to listen on, defaults to *
p,port port to listen on, defaults to 1982
"""
o = options.Options(optspec, optfunc=getopt.getopt)
(opt, flags, extra) = o.parse(sys.argv[1:])
host = opt.listen
port = opt.port and int(opt.port) or 1982
import socket
import sys
socks = []
e = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as e:
continue
try:
if af == socket.AF_INET6:
log("bup daemon: listening on [%s]:%s\n" % sa[:2])
else:
log("bup daemon: listening on %s:%s\n" % sa[:2])
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(sa)
s.listen(1)
fcntl.fcntl(s.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
except socket.error as e:
s.close()
continue
socks.append(s)
if not socks:
log('bup daemon: listen socket: %s\n' % e.args[1])
sys.exit(1)
try:
while True:
[rl,wl,xl] = select.select(socks, [], [], 60)
for l in rl:
s, src = l.accept()
try:
log("Socket accepted connection from %s\n" % (src,))
fd1 = os.dup(s.fileno())
|
fd2 = os.dup(s.fileno())
s.close()
sp = subprocess.Popen([path.exe(), 'mux', '--',
path.exe(), 'server']
+ extra, stdin=fd1, stdout=fd2)
fina
|
lly:
os.close(fd1)
os.close(fd2)
finally:
for l in socks:
l.shutdown(socket.SHUT_RDWR)
l.close()
debug1("bup daemon: done")
|
eharney/cinder
|
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
|
Python
|
apache-2.0
| 179,526
| 0.000045
|
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import math
import paramiko
import random
import re
import time
import unicodedata
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import ssh_utils
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.ibm.storwize_svc import (
replication as storwize_rep)
from cinder.volume.drivers.ibm.storwize_svc import storwize_const
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
INTERVAL_1_SEC = 1
DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.ListOpt('storwize_svc_volpool_name',
default=['volpool'],
help='Comma separated list of storage system storage '
'pools for volumes.'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
min=-1, max=100,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
min=-1, max=100,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.StrOpt('storwize_svc_vol_iogrp',
default='0',
help='The I/O group in which to allocate volumes. It can be a '
'comma-separated list in which case the driver will select an '
'io_group based on least number of volumes associated with the '
'io_group.'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
min=1, max=600,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared.'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='This option no longer has any affect. It is deprecated '
'and will be removed in the next release.',
deprecated_for_removal=True),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
cfg.StrOpt('storwize_san_secondary_ip',
default=None,
help='Specifies secondary management IP or hostname to be '
'used if san_ip is invalid or becomes inaccessible.'),
cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
default=False,
help='Specifies that the volume not be formatted during '
'creation.'),
cfg.IntOpt('storwize_svc_flashcopy_rate',
default=50,
min=1, max=100,
help='Specifies the Storwize FlashCopy copy rate to be used '
'when creating a full volume copy. The default is rate '
'is 50, and the valid rates are 1-100.'),
cfg.StrOpt('storwize_svc_mirror_pool',
default=None,
help='Specifies the name of the pool in which mirrored copy '
'is stored. Example: "pool2"'),
cfg.IntOpt('cycle_period_seconds',
default=300,
min=60, max=86400,
help='This defines an optional cycle period that applies to '
'Global Mirror relationships with a cycling mode of multi. '
'A Global Mirror relationship using the multi cycling_mode '
'performs a complete cycle at most once each period. '
'The default is 300 seconds, and the valid seconds '
'are 60-86400.'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP)
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
def __init__(self, run_ssh):
self._ssh = run_ssh
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'
|
err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lslicense(self):
|
ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
|
ganeti/ganeti
|
test/py/ganeti.utils.log_unittest.py
|
Python
|
bsd-2-clause
| 9,296
| 0.005271
|
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retai
|
n the above copyright notice,
# this list of conditions and th
|
e following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.utils.log"""
import os
import unittest
import logging
import tempfile
import shutil
import threading
from io import FileIO, StringIO
from ganeti import constants
from ganeti import errors
from ganeti import compat
from ganeti import utils
import testutils
class TestLogHandler(unittest.TestCase):
def testNormal(self):
tmpfile = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
logger.error("Test message ERROR")
logger.info("Test message INFO")
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 2)
def testReopen(self):
tmpfile = tempfile.NamedTemporaryFile()
tmpfile2 = tempfile.NamedTemporaryFile()
handler = utils.log._ReopenableLogHandler(tmpfile.name)
self.assertFalse(utils.ReadFile(tmpfile.name))
self.assertFalse(utils.ReadFile(tmpfile2.name))
logger = logging.Logger("TestLoggerReopen")
logger.addHandler(handler)
for _ in range(3):
logger.error("Test message ERROR")
handler.flush()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 3)
before_id = utils.GetFileID(tmpfile.name)
handler.RequestReopen()
self.assertTrue(handler._reopen)
self.assertTrue(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
# Rename only after requesting reopen
os.rename(tmpfile.name, tmpfile2.name)
assert not os.path.exists(tmpfile.name)
# Write another message, should reopen
for _ in range(4):
logger.info("Test message INFO")
# Flag must be reset
self.assertFalse(handler._reopen)
self.assertFalse(utils.VerifyFileID(utils.GetFileID(tmpfile.name),
before_id))
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
self.assertEqual(len(utils.ReadFile(tmpfile.name).splitlines()), 4)
self.assertEqual(len(utils.ReadFile(tmpfile2.name).splitlines()), 3)
def testConsole(self):
temp_file = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8")
failing_file = self._FailingFile(os.devnull, "w")
for (console, check) in [(None, False),
(temp_file, True),
(failing_file, False)]:
# Create a handler which will fail when handling errors
cls = utils.log._LogErrorsToConsole(self._FailingHandler)
# Instantiate handler with file which will fail when writing,
# provoking a write to the console
failing_output = self._FailingFile(os.devnull)
handler = cls(console, failing_output)
logger = logging.Logger("TestLogger")
logger.addHandler(handler)
self.assertEqual(len(logger.handlers), 1)
# Provoke write
logger.error("Test message ERROR")
# Take everything apart
logger.removeHandler(handler)
self.assertFalse(logger.handlers)
handler.close()
failing_output.close()
if console and check:
console.flush()
# Check console output
consout = utils.ReadFile(console.name)
self.assertTrue("Cannot log message" in consout)
self.assertTrue("Test message ERROR" in consout)
temp_file.close()
failing_file.close()
class _FailingFile(FileIO):
def write(self, _):
raise Exception
class _FailingHandler(logging.StreamHandler):
def handleError(self, _):
raise Exception
class TestSetupLogging(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testSimple(self):
logfile = utils.PathJoin(self.tmpdir, "basic.log")
logger = logging.Logger("TestLogger")
self.assertTrue(callable(utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
# Ensure SetupLogging used custom logger
logging.error("This message should not show up in the test log file")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
def testReopen(self):
logfile = utils.PathJoin(self.tmpdir, "reopen.log")
logfile2 = utils.PathJoin(self.tmpdir, "reopen.log.OLD")
logger = logging.Logger("TestLogger")
reopen_fn = utils.SetupLogging(logfile, "test",
console_logging=False,
syslog=constants.SYSLOG_NO,
stderr_logging=False,
multithreaded=False,
root_logger=logger)
self.assertTrue(callable(reopen_fn))
self.assertEqual(utils.ReadFile(logfile), "")
logger.error("This is a test")
self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
os.rename(logfile, logfile2)
assert not os.path.exists(logfile)
# Notify logger to reopen on the next message
reopen_fn()
assert not os.path.exists(logfile)
# Provoke actual reopen
logger.error("First message")
self.assertTrue(utils.ReadFile(logfile).endswith("First message\n"))
self.assertTrue(utils.ReadFile(logfile2).endswith("This is a test\n"))
class TestSetupToolLogging(unittest.TestCase):
def test(self):
error_name = logging.getLevelName(logging.ERROR)
warn_name = logging.getLevelName(logging.WARNING)
info_name = logging.getLevelName(logging.INFO)
debug_name = logging.getLevelName(logging.DEBUG)
for debug in [False, True]:
for verbose in [False, True]:
logger = logging.Logger("TestLogger")
buf = StringIO()
utils.SetupToolLogging(debug, verbose, _root_logger=logger, _stream=buf)
logger.error("level=error")
logger.warning("level=warning")
logger.info("level=info")
logger.debug("level=debug")
lines = buf.getvalue().splitlines()
self.assertTrue(compat.all(line.count(":") == 3 for line in lines))
messages = [line.split(":", 3)[-1].strip() for line in lines]
if debug:
self.assertEqual(messages, [
"%s level=error" % error_name,
"%s le
|
GoogleCloudPlatform/python-docs-samples
|
compute/encryption/generate_wrapped_rsa_key_test.py
|
Python
|
apache-2.0
| 1,721
| 0
|
# Copyright 2016, Google, Inc.
# Licensed under the Apache License, Vers
|
ion 2.0 (the "License");
# you may not use this file except
|
in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import googleapiclient.discovery
import generate_wrapped_rsa_key
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
def test_main():
generate_wrapped_rsa_key.main(None)
def test_create_disk():
compute = googleapiclient.discovery.build('compute', 'beta')
# Generate the key.
key_bytes = os.urandom(32)
google_public_key = generate_wrapped_rsa_key.get_google_public_cert_key()
wrapped_rsa_key = generate_wrapped_rsa_key.wrap_rsa_key(
google_public_key, key_bytes)
disk_name = 'new-encrypted-disk-{}'.format(uuid.uuid4().hex)
try:
# Create the disk, if the encryption key is invalid, this will raise.
compute.disks().insert(
project=PROJECT,
zone='us-central1-f',
body={
'name': disk_name,
'diskEncryptionKey': {
'rsaEncryptedKey': wrapped_rsa_key.decode('utf-8')
}
}).execute()
finally:
# Delete the disk.
compute.disks().delete(
project=PROJECT,
zone='us-central1-f',
disk=disk_name).execute()
|
jaryn/virt-deploy
|
virtdeploy/test_driverbase.py
|
Python
|
gpl-2.0
| 2,986
| 0
|
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import sys
import inspect
import unittest
from mock import patch
from mock import MagicMock
from . import get_driver
from . import get_driver_class
from . import get_driver_names
from .driverbase import VirtDeployDriverBase
if sys.version_info[0] == 3: # pragma: no cover
builtin_import = 'builtins.__import__'
else: # pragma: no cover
builtin_import = '__builtin__.__im
|
port__'
def try_import(spec):
def fake_import(na
|
me, globals={}, locals={}, fromlist=[], level=0):
try:
return spec(name, globals, locals, fromlist, level)
except ImportError:
return MagicMock()
return fake_import
class TestVirtDeployDriverBase(unittest.TestCase):
def _get_driver_methods(self):
return inspect.getmembers(VirtDeployDriverBase, inspect.ismethod)
def _get_driver_class(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver_class(name)
def _get_driver(self, name):
with patch(builtin_import, spec=True, new_callable=try_import):
return get_driver(name)
def test_base_not_implemented(self):
driver = VirtDeployDriverBase()
for name, method in self._get_driver_methods():
spec = inspect.getargspec(method)
with self.assertRaises(NotImplementedError) as cm:
getattr(driver, name)(*(None,) * (len(spec.args) - 1))
self.assertEqual(cm.exception.args[0], name)
def test_drivers_interface(self):
for driver_name in get_driver_names():
driver = self._get_driver_class(driver_name)
for name, method in self._get_driver_methods():
driver_method = getattr(driver, name)
self.assertNotEqual(driver_method, method)
self.assertEqual(inspect.getargspec(method),
inspect.getargspec(driver_method))
def test_get_drivers(self):
for driver_name in get_driver_names():
driver = self._get_driver(driver_name)
self.assertTrue(isinstance(driver, VirtDeployDriverBase))
|
takumak/tuna
|
src/sourceswidget.py
|
Python
|
mit
| 4,761
| 0.010292
|
import os
import logging
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import \
QWidget, QS
|
plitter, QTreeWidget, QTreeWidgetItem, QMenu, \
QTableWidgetItem
from sheetwidget import SheetWidget
from commonwidgets import *
class SourcesWidget(QSplitter):
updateRequested = pyqtSignal(name='updateRequested')
def __init__(self):
super().__init__(Qt.Horiz
|
ontal)
self.tree = QTreeWidget()
self.blank = QWidget()
self.addWidget(self.tree)
self.addWidget(self.blank)
self.tree.header().hide()
self.tree.itemSelectionChanged.connect(self.treeItemSelectionChanged)
self.tree.itemChanged.connect(lambda item, col: self.updateRequested.emit())
self.tree.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree.customContextMenuRequested.connect(self.treeContextMenuRequested)
self.fileMenu = QMenu()
self.fileMenu.addAction('&Remove file').triggered.connect(
lambda: self.removeFile(self.fileMenu.target))
self.sheets = []
def treeItemSelectionChanged(self):
items = self.tree.selectedItems()
if len(items) == 0:
self.replaceWidget(1, self.blank)
return
item = items[0]
sw = item.data(0, Qt.UserRole)[0]
if not isinstance(sw, SheetWidget):
self.replaceWidget(1, self.blank)
return
self.replaceWidget(1, sw)
def topLevelItemForFilename(self, filename):
for i in range(self.tree.topLevelItemCount()):
item = self.tree.topLevelItem(i)
if item.data(0, Qt.UserRole)[0] == filename:
return item
return None
def addFile(self, filename, checked, expanded, sheets):
fitem = self.topLevelItemForFilename(filename)
if fitem is not None:
self.tree.takeTopLevelItem(fitem)
fitem = QTreeWidgetItem([os.path.basename(filename)])
fitem.setData(0, Qt.UserRole, (filename,))
fitem.setFlags((fitem.flags() | Qt.ItemIsUserCheckable) & ~Qt.ItemIsSelectable)
fitem.setCheckState(0, Qt.Checked if checked else Qt.Unchecked)
self.tree.addTopLevelItem(fitem)
fitem.setExpanded(expanded)
for sheet, checked in sheets:
self.addSheet(fitem, sheet, checked)
def addSheet(self, fitem, sheet, checked):
def copyInput(fitem, target, toall):
key = {
'x': 'xFormula',
'y': 'yFormula',
'xrange': 'xRange'
}[target]
val = getattr(sw.sheet, key).strValue()
if toall:
fitems = []
for i in range(self.tree.topLevelItemCount()):
fitems.append(self.tree.topLevelItem(i))
else:
fitems = [fitem]
for fitem in fitems:
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw_ = sitem.data(0, Qt.UserRole)[0]
if sw_ != sw:
getattr(sw_.sheet, key).setStrValue(val)
sw = SheetWidget(sheet)
sw.copyInputRequested.connect(lambda *a: copyInput(fitem, *a))
item = QTreeWidgetItem([sheet.name])
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setData(0, Qt.UserRole, (sw,))
item.setCheckState(0, Qt.Checked if checked else Qt.Unchecked)
fitem.addChild(item)
def removeAllFiles(self):
while self.tree.topLevelItemCount() > 0:
self.tree.takeTopLevelItem(0)
def files(self):
files = []
for i in range(self.tree.topLevelItemCount()):
fitem = self.tree.topLevelItem(i)
filename = fitem.data(0, Qt.UserRole)[0]
sheets = []
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw = sitem.data(0, Qt.UserRole)[0]
sheets.append((sw, sitem.checkState(0) == Qt.Checked))
if len(sheets) > 0:
files.append({
'filename': filename,
'enabled': fitem.checkState(0) == Qt.Checked,
'expanded': fitem.isExpanded(),
'sheets': sheets
})
return files
def enabledSheetWidgets(self):
return sum([[sw for sw, c in f['sheets'] if c] for f in self.files() if f['enabled']], [])
def siblingSheetWidgets(self, sheetwidget):
for i in range(self.tree.topLevelItemCount()):
fitem = self.tree.topLevelItem(i)
widgets = []
hit = False
for j in range(fitem.childCount()):
sitem = fitem.child(j)
sw = sitem.data(0, Qt.UserRole)[0]
if sitem.checkState(0) == Qt.Checked: widgets.append(sw)
if sw == sheetwidget: hit = True
return widgets
return []
def removeFile(self, item):
idx = self.tree.indexOfTopLevelItem(item)
if idx >= 0:
self.tree.takeTopLevelItem(idx)
def treeContextMenuRequested(self, pos):
item = self.tree.itemAt(pos)
if self.tree.indexOfTopLevelItem(item) >= 0:
self.fileMenu.target = item
self.fileMenu.exec_(QCursor.pos())
|
jpszerzp/sample_AI
|
projectParams.py
|
Python
|
apache-2.0
| 818
| 0.002445
|
# projectParams.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
STUDENT_CODE_DEFAULT = 'multiAgents.py'
PROJECT_TEST_CLASSES = 'multiagentTestClasses.py'
PROJECT_NAME = 'Project 2: Multiagent search'
BONUS_PIC = Fals
|
e
|
manhhomienbienthuy/pythondotorg
|
sponsors/models/benefits.py
|
Python
|
apache-2.0
| 20,095
| 0.002787
|
"""
This module holds models related to benefits features and configurations
"""
from django import forms
from django.db import models
from django.db.models import UniqueConstraint
from django.urls import reverse
from polymorphic.models import PolymorphicModel
from sponsors.models.assets import ImgAsset, TextAsset, FileAsset, ResponseAsset, Response
from sponsors.models.enums import (
PublisherChoices,
LogoPlacementChoices,
AssetsRelatedTo,
)
########################################
# Benefit features abstract classes
from sponsors.models.managers import BenefitFeatureQuerySet
########################################
# Benefit features abstract classes
class BaseLogoPlacement(models.Model):
publisher = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in PublisherChoices],
verbose_name="Publisher",
help_text="On which site should the logo be displayed?"
)
logo_place = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in LogoPlacementChoices],
verbose_name="Logo Placement",
help_text="Where the logo should be placed?"
)
link_to_sponsors_page = models.BooleanField(
default=False,
help_text="Override URL in placement to the PSF Sponsors Page, rather than the sponsor landing page url.",
)
describe_as_sponsor = models.BooleanField(
default=False,
help_text='Override description with "SPONSOR_NAME is a SPONSOR_LEVEL sponsor of the Python Software Foundation".',
)
class Meta:
abstract = True
class BaseTieredQuantity(models.Model):
package = models.ForeignKey("sponsors.SponsorshipPackage", on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
class Meta:
abstract = True
class BaseEmailTargetable(models.Model):
class Meta:
abstract = True
class BaseAsset(models.Model):
ASSET_CLASS = None
related_to = models.CharField(
max_length=30,
choices=[(c.value, c.name.replace("_", " ").title()) for c in AssetsRelatedTo],
verbose_name="Related To",
help_text="To which instance (Sponsor or Sponsorship) should this asset relate to."
)
internal_name = models.CharField(
max_length=128,
verbose_name="Internal Name",
help_text="Unique name used internally to control if the sponsor/sponsorship already has the asset",
unique=False,
db_index=True,
)
label = models.CharField(
max_length=256,
help_text="What's the title used to display the input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta:
abstract = True
class BaseRequiredAsset(BaseAsset):
due_date = models.DateField(default=None, null=True, blank=True)
class Meta:
abstract = True
class BaseProvidedAsset(BaseAsset):
shared = models.BooleanField(
default = False,
)
def shared_value(self):
return None
class Meta:
abstract = True
class AssetConfigurationMixin:
"""
This class should be used to implement assets configuration.
It's a mixin to updates the benefit feature creation to also
create the related assets models
"""
def create_benefit_feature(self, sponsor_benefit, **kwargs):
if not self.ASSET_CLASS:
raise NotImplementedError(
"Subclasses of AssetConfigurationMixin must define an ASSET_CLASS attribute.")
# Super: BenefitFeatureConfiguration.create_benefit_feature
benefit_feature = super().create_benefit_feature(sponsor_benefit, **kwargs)
content_object = sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
content_object = sponsor_benefit.sponsorship.sponsor
asset_qs = content_object.assets.filter(internal_name=self.internal_name)
if not asset_qs.exists():
asset = self.ASSET_CLASS(
content_object=content_object, internal_name=self.internal_name,
)
asset.save()
return benefit_feature
class Meta:
abstract = True
class BaseRequiredImgAsset(BaseRequiredAsset):
ASSET_CLASS = ImgAsset
min_width = models.PositiveIntegerField()
max_width = models.PositiveIntegerField()
min_height = models.PositiveIntegerField()
max_height = models.PositiveIntegerField()
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredTextAsset(BaseRequiredAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
max_length = models.IntegerField(
default=None,
help_text="Limit to length of the input, empty means unlimited",
null=True,
blank=True,
)
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseRequiredResponseAsset(BaseRequiredAsset):
ASSET_CLASS = ResponseAsset
class Meta(BaseRequiredAsset.Meta):
abstract = True
class BaseProvidedTextAsset(BaseProvidedAsset):
ASSET_CLASS = TextAsset
label = models.CharField(
max_length=256,
help_text="What's the title used to display the text input to the sponsor?"
|
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the input should be populated",
default="",
blank=True
)
class Meta(BaseProvidedAsset.Meta):
abstract = True
class BaseProvidedFileAsset(BaseProvidedAsset):
ASSET_CLASS = FileAsset
label = models.CharField(
max_length=256,
|
help_text="What's the title used to display the file to the sponsor?"
)
help_text = models.CharField(
max_length=256,
help_text="Any helper comment on how the file should be used",
default="",
blank=True
)
shared_file = models.FileField(blank=True, null=True)
def shared_value(self):
return self.shared_file
class Meta(BaseProvidedAsset.Meta):
abstract = True
class AssetMixin:
def __related_asset(self):
"""
This method exists to avoid FK relationships between the GenericAsset
and reuired asset objects. This is to decouple the assets set up from the
real assets value in a way that, if the first gets deleted, the second can
still be re used.
"""
object = self.sponsor_benefit.sponsorship
if self.related_to == AssetsRelatedTo.SPONSOR.value:
object = self.sponsor_benefit.sponsorship.sponsor
return object.assets.get(internal_name=self.internal_name)
@property
def value(self):
asset = self.__related_asset()
return asset.value
@value.setter
def value(self, value):
asset = self.__related_asset()
asset.value = value
asset.save()
@property
def user_edit_url(self):
url = reverse("users:update_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?required_asset={self.pk}"
@property
def user_view_url(self):
url = reverse("users:view_provided_sponsorship_assets", args=[self.sponsor_benefit.sponsorship.pk])
return url + f"?provided_asset={self.pk}"
class RequiredAssetMixin(AssetMixin):
"""
This class should be used to implement required assets.
It's a mixin to get the information submitted by the user
and which is stored in the related asset class.
"""
pass
class ProvidedAssetMixin(AssetMixin):
"""
This class should be used to implement provided assets.
It's a mixin to get the information submitted by the staff
and
|
VitensTC/epynet
|
epynet/node.py
|
Python
|
apache-2.0
| 4,030
| 0.00397
|
""" EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
|
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self)
|
:
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
|
alxgu/ansible
|
test/units/modules/storage/netapp/test_netapp_e_auditlog.py
|
Python
|
gpl-3.0
| 10,758
| 0.003346
|
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
|
module_args.update(kwargs)
set_mod
|
ule_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.RE
|
bendykst/deluge
|
deluge/ui/gtkui/preferences.py
|
Python
|
gpl-3.0
| 58,154
| 0.002167
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2011 Pedro Algarvio <pedro@algarvio.me>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import os
from hashlib import sha1 as sha
import gtk
import pygtk
import deluge.common
import deluge.component as component
from deluge.configmanager import ConfigManager, get_config_dir
from deluge.error import AuthManagerError, NotAuthorizedError
from deluge.ui.client import client
from deluge.ui.gtkui.common import associate_magnet_links, get_deluge_icon
from deluge.ui.gtkui.dialogs import AccountDialog, ErrorDialog, InformationDialog, YesNoDialog
from deluge.ui.gtkui.path_chooser import PathChooser
pygtk.require('2.0')
log = logging.getLogger(__name__)
ACCOUNTS_USERNAME, ACCOUNTS_LEVEL, ACCOUNTS_PASSWORD = range(3)
COLOR_MISSING, COLOR_WAITING, COLOR_DOWNLOADING, COLOR_COMPLETED = range(4)
COLOR_STATES = {
"missing": COLOR_MISSING,
"waiting": COLOR_WAITING,
"downloading": COLOR_DOWNLOADING,
"completed": COLOR_COMPLETED
}
class Preferences(component.Component):
def __init__(self):
component.Component.__init__(self, "Preferences")
self.builder = gtk.Builder()
self.builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "preferences_dialog.ui")
))
self.pref_dialog = self.builder.get_object("pref_dialog")
self.pref_dialog.set_transient_for(component.get("MainWindow").window)
self.pref_dialog.set_icon(get_deluge_icon())
self.treeview = self.builder.get_object("treeview")
self.notebook = self.builder.get_object("notebook")
self.gtkui_config = ConfigManager("gtkui.conf")
self.window_open = False
self.load_pref_dialog_state()
self.builder.get_object("image_magnet").set_from_file(
deluge.common.get_pixmap("magnet.png"))
# Hide the unused associate magnet button on OSX see: #2420
if deluge.common.osx_check():
self.builder.get_object("button_associate_magnet").hide()
# Setup the liststore for the categories (tab pages)
self.liststore = gtk.ListStore(int, str)
self.treeview.set_model(self.liststore)
render = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Categories"), render, text=1)
self.treeview.append_column(column)
# Add the default categories
i = 0
for category in (_("Interface"), _("Downloads"), _("Bandwidth"), _("Queue"), _("Network"),
_("Proxy"), _("Cache"), _("Other"), _("Daemon"), _("Plugins"), "_separator_"):
self.liststore.append([i, category])
i += 1
def set_separator(model, iter, data=None):
if "_separator_" == model.get_value(iter, 1):
return True
self.treeview.set_row_separator_func(set_separator)
# Setup accounts tab lisview
self.accounts_levels_mapping = None
self.accounts_authlevel = self.builder.get_object("accounts_authlevel")
self.accounts_liststore = gtk.ListStore(str, str, str, int)
self.accounts_liststore.set_sort_column_id(ACCOUNTS_USERNAME, gtk.SORT_ASCENDING)
self.accounts_listview = self.builder.get_object("accounts_listview")
self.accounts_listview.append_column(
gtk.TreeViewColumn(
_("Username"), gtk.CellRendererText(), text=ACCOUNTS_USERNAME
)
)
self.accounts_listvi
|
ew.append_column(
gtk.TreeViewColumn(
_("Level"), gtk.CellRendererText(), text=ACCOUNTS_LEVEL
)
)
password_column = gtk.TreeViewColumn(
'password', gtk.CellRendererText(), text=ACCOUNTS_PASS
|
WORD
)
self.accounts_listview.append_column(password_column)
password_column.set_visible(False)
self.accounts_listview.set_model(self.accounts_liststore)
self.accounts_listview.get_selection().connect(
"changed", self._on_accounts_selection_changed
)
self.accounts_frame = self.builder.get_object("AccountsFrame")
# Setup plugin tab listview
# The third entry is for holding translated plugin names
self.plugin_liststore = gtk.ListStore(str, bool, str)
self.plugin_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.plugin_listview = self.builder.get_object("plugin_listview")
self.plugin_listview.set_model(self.plugin_liststore)
render = gtk.CellRendererToggle()
render.connect("toggled", self.on_plugin_toggled)
render.set_property("activatable", True)
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Enabled"), render, active=1))
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Plugin"), gtk.CellRendererText(), text=2))
# Connect to the 'changed' event of TreeViewSelection to get selection
# changes.
self.treeview.get_selection().connect(
"changed", self.on_selection_changed
)
self.plugin_listview.get_selection().connect(
"changed", self.on_plugin_selection_changed
)
self.builder.connect_signals({
"on_pref_dialog_delete_event": self.on_pref_dialog_delete_event,
"on_button_ok_clicked": self.on_button_ok_clicked,
"on_button_apply_clicked": self.on_button_apply_clicked,
"on_button_cancel_clicked": self.on_button_cancel_clicked,
"on_toggle": self.on_toggle,
"on_test_port_clicked": self.on_test_port_clicked,
"on_button_plugin_install_clicked": self._on_button_plugin_install_clicked,
"on_button_rescan_plugins_clicked": self._on_button_rescan_plugins_clicked,
"on_button_find_plugins_clicked": self._on_button_find_plugins_clicked,
"on_button_cache_refresh_clicked": self._on_button_cache_refresh_clicked,
"on_combo_encryption_changed": self._on_combo_encryption_changed,
"on_combo_proxy_type_changed": self._on_combo_proxy_type_changed,
"on_button_associate_magnet_clicked": self._on_button_associate_magnet_clicked,
"on_accounts_add_clicked": self._on_accounts_add_clicked,
"on_accounts_delete_clicked": self._on_accounts_delete_clicked,
"on_accounts_edit_clicked": self._on_accounts_edit_clicked,
"on_piecesbar_toggle_toggled": self._on_piecesbar_toggle_toggled,
"on_completed_color_set": self._on_completed_color_set,
"on_revert_color_completed_clicked": self._on_revert_color_completed_clicked,
"on_downloading_color_set": self._on_downloading_color_set,
"on_revert_color_downloading_clicked": self._on_revert_color_downloading_clicked,
"on_waiting_color_set": self._on_waiting_color_set,
"on_revert_color_waiting_clicked": self._on_revert_color_waiting_clicked,
"on_missing_color_set": self._on_missing_color_set,
"on_revert_color_missing_clicked": self._on_revert_color_missing_clicked,
"on_pref_dialog_configure_event": self.on_pref_dialog_configure_event,
"on_checkbutton_language_toggled": self._on_checkbutton_language_toggled,
})
if not deluge.common.osx_check() and not deluge.common.windows_check():
try:
import appindicator
assert appindicator # silence pyflakes
except ImportError:
pass
else:
self.builder.get_object("alignment_tray_type").set_visible(True)
from deluge.ui.gtkui.gtkui import DEFAULT_PREFS
self.COLOR_DEFAULTS = {}
for key in ("missing", "waiting", "downloading", "completed"):
self.COLOR_DEFAULTS[key] = DEFAULT_PREFS["pieces_color_%s" % key][:]
del DEF
|
awalin/rwis
|
views.py
|
Python
|
lgpl-3.0
| 4,395
| 0.020023
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.views.generic.base import View, TemplateView
from django.views import generic
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, Context
from django.utils import simplejson
from settings import CLARUS_DBCONN_STRING
import psycopg2, sys, pprint, json
from d
|
atetime import datetime
from django.views.decorators.csrf impo
|
rt ensure_csrf_cookie
import logging
from array import *
# logger = logging.getLogger('print')
class LoadCanvas(View):
template_name= "index.html"
def get(self, request, *args, **kwargs):
c = {}
c.update(csrf(request))
return render_to_response(self.template_name, c)
class FetchObservations(View):
# template_name = "timeline/timelines.html"
obsType = array('i'); # 575
startTime = '' ;
stationID ='';
#@route ('/observe', method='POST')
def post(self, request, *args, **kwargs):
try:
json_data = simplejson.loads(request.body)
print 'Raw Data: "%s"' % request.body
# print json_data
self.startTime = str(json_data['startTime'])
# self.startTime = '2013-07-09 00:00:00';
# print self.startTime
self.stationID = json_data['stationID']
stationList = ",".join([str(x) for x in self.stationID])
# print stationList
self.obsType = json_data['obsType']
# print self.obsType
conn_string = CLARUS_DBCONN_STRING # get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string) # conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
r = [];
# print self.obsType[0]
# print self.obsType[1]
for obs in self.obsType:
# execute our Query
data = {}
obsStr = str(obs)
cursor.execute("SELECT "+
"TO_CHAR( (date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0)), 'YYYY-MM-DD HH24:MI' ) AS tstamp, "+
"AVG( metric_value ) AS metric_value " +
"FROM clarus.observation, clarus.sensor "+
"WHERE clarus.observation.sensor_id=clarus.sensor.sensor_id "+
"AND station_id IN (" + stationList + ") AND observation_type = " + obsStr + " "+
"AND tstamp >= (timestamp '"+self.startTime+"' - INTERVAL '1 week') AND tstamp < timestamp '"+self.startTime+"' " +
"GROUP BY date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0) "+
"ORDER BY tstamp asc" );
data['rows'] = [dict((cursor.description[i][0], value)
for i, value in enumerate(row)) for row in cursor.fetchall()]
# this query is no longer needed as the metadata is all loaded separately
#cursor.execute("SELECT name, description, metric_abbreviation "+
# "FROM clarus.observation_type_lkp "+
# "WHERE clarus.observation_type_lkp.observation_type= "+ obs +"");
#data['title'] = ([dict((cursor.description[i][0], value)
# for i, value in enumerate(row)) for row in cursor.fetchall()])
data['title'] = obs
r.append(data);
cursor.connection.close();
# now process it
json_output = simplejson.dumps(r)
return HttpResponse(json_output, content_type="application/json")
except:
return HttpResponse("<h1>Error in running query</h1>")
# logger.error('Getting observation data failed')
|
airbnb/airflow
|
tests/dags/test_heartbeat_failed_fast.py
|
Python
|
apache-2.0
| 1,163
| 0.00086
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor lic
|
ense agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The A
|
SF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.bash import BashOperator
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_heartbeat_failed_fast', default_args=args)
task = BashOperator(task_id='test_heartbeat_failed_fast_op', bash_command='sleep 7', dag=dag)
|
gandreello/openthread
|
tests/scripts/thread-cert/Cert_7_1_04_BorderRouterAsRouter.py
|
Python
|
bsd-3-clause
| 4,738
| 0.000633
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
LEADER = 1
ROUTER = 2
ED2 = 3
SED2 = 4
MTDS = [SED2, ED2]
class Cert_7_1_4_BorderRouterAsRouter(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, (i in MTDS), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[L
|
EADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
|
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[SED2].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[SED2].set_panid(0xface)
self.nodes[SED2].set_mode('s')
self.nodes[SED2].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[SED2].enable_whitelist()
self.nodes[SED2].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[SED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED2].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[ROUTER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[ROUTER].register_netdata()
self.simulator.go(5)
addrs = self.nodes[ED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
Podcastor/podcastor-backend
|
src/app/api/urls.py
|
Python
|
gpl-2.0
| 1,923
| 0
|
# -*- coding: utf-8 -*-
from rest_framework.routers import (Route,
DynamicDetailRoute,
SimpleRouter,
DynamicListRoute)
from app.api.account.views import AccountViewSet
from app.api.podcast.views import PodcastViewSet, EpisodeViewSet
class CustomRouter(SimpleRouter):
"""
A router for read-only APIs, which doesn't use trailing slashes.
"""
routes = [
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes.
# Generated using @list_route decorator
# on methods of the viewset.
DynamicListRoute(
url=r'^{prefix}/{methodnamehyphen}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes.
# Generated using @detail_route decorator
|
on methods of the viewset.
DynamicDetailRoute(
|
url=r'^{prefix}/{lookup}/{methodnamehyphen}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
router = CustomRouter()
router.register(r'accounts', AccountViewSet)
router.register(r'podcasts', PodcastViewSet)
router.register(r'episodes', EpisodeViewSet)
urlpatterns = router.urls
|
pknight007/electrum-vtc
|
plugins/keepkey/vtc.py
|
Python
|
mit
| 353
| 0
|
from ..trezor.qt_generic import QtPlugin
from keepkey import KeepKeyPlugin
c
|
lass Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_cla
|
ss(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
|
djf604/viewer
|
util/link_out_scraper.py
|
Python
|
apache-2.0
| 1,060
| 0.003774
|
__author__ = 'Jason Grundstad'
from django.conf import settings
fro
|
m pyvirtualdisplay import Display
from selenium import webdriver
from bs4 import BeautifulSoup
import json
MD_ANDERSON_URL = 'https://pct.mdanderson.org/#/home'
MD_ANDERSON_OUTFILE = settings.LINKS_OUT + 'mdanderson.json
|
'
def scrape_mdanderson():
"""
Scrape the rendered mdanderson page for gene names, create a .json
of links
:rtype : dict
"""
gene_list = dict()
d = Display(visible=0, size=(800,600)) # requires xvfb for headless mode
d.start()
driver = webdriver.Firefox()
driver.get(MD_ANDERSON_URL)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for a_tag in soup.find_all("a", {'class':'ng-binding'}):
gene_list[a_tag.text] = "{}/{}?section=Overview".format(
MD_ANDERSON_URL,
a_tag.text)
gene_list_json = json.dumps(gene_list)
with open(MD_ANDERSON_OUTFILE, 'w') as f:
json.dump(gene_list_json, f)
def main():
scrape_mdanderson()
if __name__ == '__main__':
main()
|
avinassh/learning-tornado
|
tornado-book/databases/definitions_readwrite.py
|
Python
|
mit
| 1,628
| 0.023956
|
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import pymongo
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
# put your mongodb username and password
# "mongodb://username:password@staff.mongohq.com:someport/mongodb_name"
# following is obtained from https://app.mongohq.com/username/mongo/mongodbname/admin
MONGOHQ_URL = "mongodb://avi:test@paulo.mongohq.com:10065/testme"
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/(\w+)", WordHandler)]
# conn = pymongo.Connection("localhost", 27017)
# self.db = conn["definitions"]
conn = pymongo.Connection(MONGOHQ_URL)
self.db = conn.testme
tornado.web.Application.__init__(self, handlers, debug=True)
class WordHandler(tornado.web.RequestHandler):
def get(self, word):
coll = self.application.db.words
word_doc = coll.find_one({"word": word})
if word_doc:
del word_doc["_id"]
self.write(word_doc)
else:
self.set_status(404)
def post(self, word):
definition = self.get_argument("definition")
coll = self.application.db.words
word_doc =
|
coll.find_one({"word": word})
if word_doc:
word_doc['definition'] = definition
coll.save(word_doc)
else:
word_doc = {'word': word, 'definition': definition}
coll.inser
|
t(word_doc)
del word_doc["_id"]
self.write(word_doc)
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
HazyResearch/snorkel
|
snorkel/learning/tensorflow/rnn/text_rnn.py
|
Python
|
apache-2.0
| 1,044
| 0.001916
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
import numpy as np
from .rnn_base import RNNBase
from .utils import SymbolTable
class TextRNN(RNNBase):
"""TextRNN for strings of text."""
def _preprocess_data(self, candidates, extend=False):
"""Convert candidate sentences to lookup sequences
:param candidates: candidates to process
:param extend: extend symbol table for tokens (train), or lookup (test)?
"""
if not hasattr(self, 'word_dict'):
self.word_dict = SymbolTable()
data, ends = [], []
for candidate in candidates:
|
toks = candidate.get_contexts()[0].text.split()
# Either extend word table or retrieve from it
f = self.word_dict.get if extend else self.word_dict.lookup
data.append(np.array(li
|
st(map(f, toks))))
ends.append(len(toks))
return data, ends
|
eco32i/ngs
|
ngs/wsgi.py
|
Python
|
mit
| 1,410
| 0.000709
|
"""
WSGI config for ngs project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introdu
|
ce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode wit
|
h each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "ngs.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ngs.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
aino/pymail365
|
setup.py
|
Python
|
bsd-3-clause
| 688
| 0.001453
|
from setuptools import setup
setup(
name='pymail365',
version='0.1',
description='A python client for sending mail using Microsoft Office 365 rest service.',
long_description=open('README.r
|
st').read(),
author='Mikko Hellsing',
author_email='mikko@aino.se',
license='BSD',
url='https://github.com/aino/pymail365',
packages=['pymail365'],
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved ::
|
BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
drwahl/i3pystatus
|
i3pystatus/window_title.py
|
Python
|
mit
| 3,544
| 0.000847
|
# -*- coding: utf-8 -*-
from i3pystatus import Module
from threading import Thread
import i3ipc
class WindowTitle(Module):
"""
Display the current window title with async update.
Uses asynchronous update via i3 IPC events.
Provides instant title update only when it required.
fork from window_tile_async of py3status by Anon1234 https://github.com/Anon1234
Requires the PyPI package `i3ipc`.
.. rubric:: Available formaters
* `{title}` — title of current focused window
* `{class_name}` - name of application class
@author jok
@license BSD
"""
settings = (
("format", "format string."),
("always_show", "do not hide the title when it can be already visible"),
("empty_title", "string that will be shown instead of the title when the title is hidden"),
("max_width", "maximum width of title"),
("color", "text color"),
)
format = "{title}"
always_show = False
empty_title = ""
max_width = 79
color = "#FFFFFF"
def init(self):
self.title = self.empty_title
self.output = {
"full_text": self.title,
"color": self.color,
}
# we are listening to i3 events in a separate thread
t = Thread(target=self._loop)
t.daemon = True
t.start()
def get_title(self, conn):
tree = c
|
onn.get_tree()
w = tree.find_focused()
p = w.parent
# don't show window title when the window already has means
# to display it
if (not self.always_show
and (w.border == "normal"
or w.type == "workspace"
or (p.layout in ("stacked", "tabbed") and len(p.nodes) > 1))):
return self.empty_title
else:
title =
|
w.name
class_name = w.window_class
if len(title) > self.max_width:
title = title[:self.max_width - 1] + "…"
return self.format.format(title=title, class_name=class_name)
def update_title(self, conn, e):
# catch only focused window title updates
title_changed = hasattr(e, "container") and e.container.focused
# check if we need to update title due to changes
# in the workspace layout
layout_changed = (
hasattr(e, "binding")
and (e.binding.command.startswith("layout")
or e.binding.command.startswith("move container")
or e.binding.command.startswith("border"))
)
if title_changed or layout_changed:
self.title = self.get_title(conn)
self.update_display()
def clear_title(self, *args):
self.title = self.empty_title
self.update_display()
def update_display(self):
self.output = {
"full_text": self.title,
"color": self.color,
}
def _loop(self):
conn = i3ipc.Connection()
self.title = self.get_title(conn) # set title on startup
self.update_display()
# The order of following callbacks is important!
# clears the title on empty ws
conn.on('workspace::focus', self.clear_title)
# clears the title when the last window on ws was closed
conn.on("window::close", self.clear_title)
# listens for events which can trigger the title update
conn.on("window::title", self.update_title)
conn.on("window::focus", self.update_title)
conn.main() # run the event loop
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/thirdparty/src/reportlab/pdfgen/pdfimages.py
|
Python
|
gpl-3.0
| 8,517
| 0.009041
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pdfimages.py
__version__=''' $Id$ '''
__doc__="""
Image functionality sliced out of canvas.py for generalization
"""
import os
import string
from types import StringType
import reportlab
from reportlab import rl_config
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.lib.utils import fp_str, getStringIO
from reportlab.lib.utils import import_zlib, haveImages
from reportlab.lib.boxstuff import aspectRatioFix
class PDFImage:
"""Wrapper around different "image sources". You can make images
from a PIL Image object, a filename (in which case it uses PIL),
an image we previously cached (optimisation, hardly used these
days) or a JPEG (which PDF supports natively)."""
def __init__(self, image, x,y, width=None, height=None, caching=0):
self.image = image
self.x = x
self.y = y
self.width = width
self.height = height
self.filename = None
self.imageCaching = caching
# the following facts need to be determined,
# whatever the source. Declare what they are
# here for clarity.
self.colorSpace = 'DeviceRGB'
self.bitsPerComponent = 8
self.filters = []
self.source = None # JPEG or PIL, set later
self.getImageData()
def jpg_imagedata(self):
#directly process JPEG files
#open file, needs some error handling!!
fp = open(self.image, 'rb')
try:
result = self._jpg_imagedata(fp)
finally:
fp.close()
return result
def _jpg_imagedata(self,imageFile):
info = pdfutils.readJPEGInfo(imageFile)
self.source = 'JPEG'
imgwidth, imgheight = info[0], info[1]
if info[2] == 1:
colorSpace = 'DeviceGray'
elif info[2] == 3:
colorSpace = 'DeviceRGB'
else: #maybe should generate an error, is this right for CMYK?
colorSpace = 'DeviceCMYK'
imageFile.seek(0) #reset file pointer
imagedata = []
#imagedata.append('BI /Width %d /Height /BitsPerComponent 8 /ColorSpace /%s /Filter [/Filter [ /ASCII85Decode /DCTDecode] ID' % (info[0], info[1], colorSpace))
imagedata.append('BI /W %d /H %d /BPC 8 /CS /%s /F [%s/DCT] ID' % (imgwidth, imgheight, colorSpace, rl_config.useA85 and '/A85 ' or ''))
#write in blocks of (??) 60 characters per line to a list
data = imageFile.read()
if rl_config.useA85:
data = pdfutils._AsciiBase85Encode(data)
pdfutils._chunker(data,imagedata)
imagedata.append('EI')
return (imagedata, imgwidth, imgheight)
def cache_imagedata(self):
image = self.image
if not pdfutils.cachedImageExists(image):
zlib = import_zlib()
if not zlib: return
if not haveImages: return
pdfutils.cacheImageFile(image)
#now we have one cached, slurp it in
cachedname = os.path.splitext(image)[0] + (rl_config.useA85 and '.a85' or '.bin')
imagedata = open(cachedname,'rb').readlines()
#trim off newlines...
imagedata = map(string.strip, imagedata)
return imagedata
def PIL_imagedata(self):
image = self.image
if image.format=='JPEG':
fp=
|
image.fp
fp.seek(0)
return self._jpg_imagedata(fp)
self.source = 'PIL'
zlib = import_zlib()
if not zlib: return
bpc = 8
# Use the colorSpace in the image
if image.mode == 'CMYK':
myimage = image
colorSpace = 'DeviceCMYK'
bpp = 4
elif image.mode == '1':
myimage = image
colorSpace = 'DeviceGray'
bpp = 1
bpc = 1
elif image.mode
|
== 'L':
myimage = image
colorSpace = 'DeviceGray'
bpp = 1
else:
myimage = image.convert('RGB')
colorSpace = 'RGB'
bpp = 3
imgwidth, imgheight = myimage.size
# this describes what is in the image itself
# *NB* according to the spec you can only use the short form in inline images
imagedata=['BI /W %d /H %d /BPC %d /CS /%s /F [%s/Fl] ID' % (imgwidth, imgheight, bpc, colorSpace, rl_config.useA85 and '/A85 ' or '')]
#use a flate filter and, optionally, Ascii Base 85 to compress
raw = myimage.tostring()
rowstride = (imgwidth*bpc*bpp+7)/8
assert len(raw) == rowstride*imgheight, "Wrong amount of data for image"
data = zlib.compress(raw) #this bit is very fast...
if rl_config.useA85:
data = pdfutils._AsciiBase85Encode(data) #...sadly this may not be
#append in blocks of 60 characters
pdfutils._chunker(data,imagedata)
imagedata.append('EI')
return (imagedata, imgwidth, imgheight)
def non_jpg_imagedata(self,image):
if not self.imageCaching:
imagedata = pdfutils.cacheImageFile(image,returnInMemory=1)
else:
imagedata = self.cache_imagedata()
words = string.split(imagedata[1])
imgwidth = string.atoi(words[1])
imgheight = string.atoi(words[3])
return imagedata, imgwidth, imgheight
def getImageData(self,preserveAspectRatio=False):
"Gets data, height, width - whatever type of image"
image = self.image
if type(image) == StringType:
self.filename = image
if os.path.splitext(image)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
try:
imagedata, imgwidth, imgheight = self.jpg_imagedata()
except:
imagedata, imgwidth, imgheight = self.non_jpg_imagedata(image) #try for normal kind of image
else:
imagedata, imgwidth, imgheight = self.non_jpg_imagedata(image)
else:
import sys
if sys.platform[0:4] == 'java':
#jython, PIL not available
imagedata, imgwidth, imgheight = self.JAVA_imagedata()
else:
imagedata, imgwidth, imgheight = self.PIL_imagedata()
self.imageData = imagedata
self.imgwidth = imgwidth
self.imgheight = imgheight
self.width = self.width or imgwidth
self.height = self.height or imgheight
def drawInlineImage(self, canvas, preserveAspectRatio=False,anchor='sw'):
"""Draw an Image into the specified rectangle. If width and
height are omitted, they are calculated from the image size.
Also allow file names as well as images. This allows a
caching mechanism"""
width = self.width
height = self.height
if width<1e-6 or height<1e-6: return False
x,y,self.width,self.height, scaled = aspectRatioFix(preserveAspectRatio,anchor,self.x,self.y,width,height,self.imgwidth,self.imgheight)
# this says where and how big to draw it
if not canvas.bottomup: y = y+height
canvas._code.append('q %s 0 0 %s cm' % (fp_str(self.width), fp_str(self.height, x, y)))
# self._code.extend(imagedata) if >=python-1.5.2
for line in self.imageData:
canvas._code.append(line)
canvas._code.append('Q')
return True
def format(self, document):
"""Allow it to be used within pdfdoc framework. This only
defines how it is stored, not how it is drawn later."""
dict = pdfdoc.PDFDictionary()
dict['Type'] = '/XObject'
dict['Subtype'] = '/Image'
dict['Width'] = self.width
dict['Height'] = self.height
dict['BitsPerComponent'] = 8
dict['ColorSpace'] = pdfdoc.PDFName(self.colorSpace)
content = string.join(self.imageData[3:-1], '\n') + '\n'
strm = pdfdoc.PDFStream(dictionary=dict, content=content)
return strm.format(document)
if __name__=='__main__':
srcfile = os.path.join(
os
|
google-research/falken
|
service/api/stop_session_handler.py
|
Python
|
apache-2.0
| 9,830
| 0.005086
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Handles stopping of an active session."""
from absl import logging
from api import model_selector
from api import unique_id
import common.generate_protos # pylint: disable=unused-import
from data_store import resource_store
import data_store_pb2
import falken_service_pb2
from google.rpc import code_pb2
import session_pb2
def stop_session(request, context, data_store):
"""Stops an active session.
Args:
request: falken_service_pb2.StopSessionRequest containing information
about the session requested to be stopped.
context: grpc.ServicerContext containing context about the RPC.
data_store: data_store.DataStore object to update the session.
Returns:
falken_service_pb2.StopSessionResponse containing the snapshot_id for the
session.
Raises:
Exception: The gRPC context is aborted when the session is not found in
data_store, or other issues occur in the handling.
"""
logging.debug('StopSession called for project_id %s with session %s.',
request.project_id, request.session.name)
session = None
session_resource_id = data_store.resource_id_from_proto_ids(
project_id=request.session.project_id,
brain_id=request.session.brain_id,
session_id=request.session.name)
try:
session = data_store.read(session_resource_id)
except FileNotFoundError as e:
context.abort(
code_pb2.NOT_FOUND,
f'Failed to find session {session_resource_id.session} in data_store. '
f'{e}')
selector = model_selector.ModelSelector(data_store, session_resource_id)
try:
model_resource_id = selector.select_final_model()
except FileNotFoundError as e:
model_resource_id = None
except Exception as e: # pylint: disable=broad-except
context.abort(code_pb2.NOT_FOUND, f'Error while selecting final model. {e}')
snapshot_id = _get_snapshot_id(
session_resource_id, session, model_resource_id, data_store, context)
# Update session to be ended with snapshot ID.
session.snapshot = snapshot_id
data_store.write_stopped_session(session)
return falken_service_pb2.StopSessionResponse(snapshot_id=snapshot_id)
def _get_snapshot_id(
session_resource_id, session, model_resource_id, data_store, context):
"""Get snapshot ID for the session differentiated by session type.
Creates a new snapshot if appropriate.
Args:
session_resource_id: resource_id.FalkenResourceID for the session being
stopped.
session: data_store_pb2.Session instance.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector for this session.
data_store: data_store.DataStore instance to read and write the new
snapshot into.
context: grpc.ServicerContext containing context to abort when issues occur.
Returns:
Snapshot ID string.
Raises:
The gRPC context is aborted when issues occur with reading/writing the
snapshot.
"""
# If training happened (training or evaluation), create new snapshot,
# otherwise return initial snapshot.
if session.session_type == session_pb2.INFERENCE:
# End of an inference session, return the initial snapshot.
try:
return _single_starting_snapshot(
session_resource_id, session.starting_snapshots)
except ValueError as e:
context.abort(code_pb2.INVALID_ARGUMENT, e)
elif session.session_type == session_pb2.INTERACTIVE_TRAINING:
try:
return _create_or_use_existing_snapshot(
session_resource_id, session.starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot=False)
except (FileNotFoundError, resource_store.InternalError,
ValueError) as e:
context.abort(
code_pb2.NOT_FOUND, 'Failed to create snapshot for training session '
f'{session_resource_id.session}. {e}')
elif session.session_type == session_pb2.EVALUATION:
try:
return _create_or_use_existing_snapshot(
session_resource_id, session.starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot=True)
except (FileNotFoundError, resource_store.InternalError,
ValueError) as e:
context.abort(
code_pb2.NOT_FOUND, 'Failed to create snapshot for evaluation session'
f' {session_resource_id.session}. {e}')
else:
context.abort(
code_pb2.INVALID_ARGUMENT,
f'Unsupported session_type: {session.session_type} found in '
f'{session.session_id}.')
def _single_starting_snapshot(session_resource_id, starting_snapshots):
"""Returns the single starting snapshot from a list of snapshot IDs.
Args:
session_resource_id: resource_id.FalkenResourceId for the session with the
starting snapshots.
starting_snapshots: List of snapshot IDs specified as starting snapshots
for the session.
Returns:
Starting snapshot ID string.
Raises:
ValueError if the length of starting snapshots is not 1.
"""
if len(starting_snapshots) != 1:
raise ValueError(
'Unexpected number of starting_snapshots, wanted exactly 1, got '
f'{len(starting_snapshots)} for session {session_resource_id.session}.')
return starting_snapshots[0]
def _create_or_use_existing_snapshot(
session_resource_id, starting_snapshots, model_resource_id,
data_store, expect_starting_snapshot):
"""Return snapshot ID for a new snapshot or existing snapshot.
If a final model was selected by model_selector and passed onto the
model_resource_id, creates a snapshot for this model and return the snapshot
ID. Otherwise, returns the starting snapshot ID.
Args:
session_resource_id: resource_id.FalkenResourceID for the session being
stopped.
starting_snapshots: Starting snapshot IDs of the session being stopped.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector for this session.
data_store: data_store.DataStore instance to read and write the new
snapshot into.
expect_starting_snapshot: bool, whether we are expecting a starting
snapshot. If False and no model ID is found, we return an empty string.
Returns:
Snapshot ID string for the created snapshot or the starting snapshot.
Raises:
FileNotFoundError, InternalError for issues while writing the snapshot.
ValueError for issues while getting starting snapshot IDs.
"""
if model_resource_id:
# Use model ID for new snapshot.
model = data_store.read(model_resource_id)
return _create_snapshot(session_resource_id, starting_snapshots,
model_resource_id, model.model_path, data_store)
else:
# Return existing snapshot. Callers with expect_starting_snapshot True will
# raise ValueError if the len(starting_shots) != 1.
if len(starting_snapshots) == 1 or expect_starting_snapshot:
return _single_sta
|
rting_snapshot(
session_resource_id, starting_snapshots)
return ''
def _create_snapshot(session_resource_id, starting_snapshots,
model_resource_id, model_path, data_store):
"""Creates a new snapshot in data_store.
Args:
session_resource_id: resource_id.FalkenResourceID for the session to create
the new snapshot for.
starting_snapshots: Starting snapshot IDs of the session to create the
|
new snapshot for.
model_resource_id: resource_id.FalkenResourceID for the model that was
selected by model_selector to create the snapshot for.
model_path: Path for the model to write the snapshot fo
|
msegado/edx-platform
|
common/lib/xmodule/xmodule/partitions/partitions.py
|
Python
|
agpl-3.0
| 10,492
| 0.002669
|
"""Defines ``Group`` and ``UserPartition`` models for partitioning"""
from collections import namedtuple
from stevedore.extension import ExtensionManager
# We use ``id`` in this file as the IDs of our Groups and UserPartitions,
# which Pylint disapproves of.
# pylint: disable=redefined-builtin
# UserPartition IDs must be unique. The Cohort and Random UserPartitions (when they are
# created via Studio) choose an unused ID in the range of 100 (historical) to MAX_INT. Therefore the
# dynamic UserPartitionIDs must be under 100, and they have to be hard-coded to ensure
# they are always the same whenever the dynamic partition is added (since the UserPartition
# ID is stored in the xblock group_access dict).
ENROLLMENT_TRACK_PARTITION_ID = 50
MINIMUM_STATIC_PARTITION_ID = 100
class UserPartitionError(Exception):
"""
Base Exception for when an error was found regarding user partitions.
"""
pass
class NoSuchUserPartitionError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition by its ID fails.
"""
pass
class NoSuchUserPartitionGroupError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition Group by its ID fails.
"""
pass
class ReadOnlyUserPartitionError(UserPartitionError):
"""
Exception to be raised when attempting to modify a read only partition.
"""
pass
class Group(namedtuple("Group", "id name")):
"""
An id and name for a group of students. The id should be unique
within the UserPartition this group appears in.
"""
# in case we want to add to this class, a version will be handy
# for deserializing old versions. (This will be serialized in courses)
VERSION = 1
def __new__(cls, id, name):
return super(Group, cls).__new__(cls, int(id), name)
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the group.
"""
return {
"id": self.id,
"name": self.name,
"version": Group.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, Group):
return value
for key in ("id", "name", "version"):
if key not in value:
raise TypeError("Group dict {0} missing value key '{1}'".format(
value, key))
if value["version"] != Group.VERSION:
raise TypeError("Group dict {0} has unexpected version".format(
value))
return Group(value["id"], value["name"])
# The Stevedore extension point namespace for user partition scheme plugins.
USER_PARTITION_SCHEME_NAMESPACE = 'openedx.user_partition_scheme'
class UserPartition(namedtuple("UserPartition", "id name description groups scheme parameters active")):
"""A named way to partition users into groups, primarily intended for
running experiments. It is expected that each user will be in at most one
group in a partition.
A Partition has an id, name, scheme, description, parameters, and a list
of groups. The id is intended to be unique within the context where these
are used. (e.g., for partitions of users within a course, the ids should
be unique per-course). The scheme is used to assign users into groups.
The parameters field is used to save extra parameters e.g., location of
the block in case of VerificationPartitionScheme.
Partitions can be marked as inactive by setting the "active" flag to False.
Any group access rule referencing inactive partitions will be ignored
when performing access checks.
"""
VERSION = 3
# The collection of user partition scheme extensions.
scheme_extensions = None
# The default scheme to be used when upgrading version 1 partitions.
VERSION_1_SCHEME = "random"
def __new__(cls, id, name, description, groups, scheme=None, parameters=None, active=True,
scheme_id=VERSION_1_SCHEME):
if not scheme:
scheme = UserPartition.get_scheme(scheme_id)
if parameters is None:
parameters = {}
return super(UserPartition, cls).__new__(cls, int(id), name, description, groups, scheme, parameters, active)
@staticmethod
def get_scheme(name):
"""
Returns the user partition scheme with the given name.
"""
# Note: we're creating the extension manager lazily to ensure that the Python path
# has been correctly set up. Trying to create this statically will fail, unfortunately.
if not UserPartition.scheme_extensions:
UserPartition.scheme_extensions = ExtensionManager(namespace=USER_PARTITION_SCHEME_NAMESPACE)
try:
scheme = UserPartition.scheme_extensions[name].plugin
except KeyError:
raise UserPartitionError("Unrecognized scheme '{0}'".format(name))
scheme.name = name
return scheme
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the partition.
"""
return {
"id": self.id,
"name": self.name,
"scheme": self.scheme.name,
"description": self.description,
"parameters": self.parameters,
"groups": [g.to_json() for g in self.groups],
"active": bool(self.active),
"version": UserPartition.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, UserPartition):
return value
for key in ("id", "name", "description", "version", "groups"):
if key not in value:
raise TypeError("UserPartition dict {0} missing value key '{1}'".format(value, key))
if value["version"] == 1:
# If no scheme was provided, set it to the default ('random')
scheme_id = UserPartition.VERSION_1_SCHEME
# Version changes should be backwards compatible in case the code
# gets rolled back. If we see a version number greater than the current
# version, we should try to read it rather than raising an exception.
elif value["version"] >= 2:
if "scheme" not in value:
raise TypeError("UserPartition dict {0} missing value key 'scheme'".format(value))
scheme_id = value["scheme"]
else:
raise TypeError("UserPartition dict {0} has unexpected version".format(value))
parameters = value.get("parameters", {})
active = value.get("active", True)
|
groups = [Group.from_json(g) for g in value["groups"]]
scheme = UserPartition.get_scheme(scheme_id)
if not scheme:
raise TypeError("UserPartition dict {0} has unrecognized scheme {1}".format(value, scheme_id))
if getattr(scheme, 'read_
|
only', False):
raise ReadOnlyUserPartitionError("UserPartition dict {0} uses scheme {1} which is read only".format(value, scheme_id))
if hasattr(scheme, "create_user_partition"):
return scheme.create_user_partition(
value["id"],
value["name"],
value["description"],
groups,
parameters,
active,
)
else:
return UserPartition(
value["id"],
value["name"],
value["description"],
groups,
scheme,
parameters,
active,
|
ycasg/PyNLO
|
src/pynlo/light/__init__.py
|
Python
|
gpl-3.0
| 245
| 0.004082
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 03 09:38:19 2014
"""
from .PulseBase import Pulse
from .beam import OneDBeam
from . import beam
from . imp
|
ort DerivedPulses
from . import PulseBase
from .high_V_waveguide import OneDBeam_hig
|
hV_WG
|
LaurentClaessens/LaTeXparser
|
Occurrence.py
|
Python
|
gpl-3.0
| 7,331
| 0.015278
|
# -*- coding: utf8 -*-
###########################################################################
# This is the package latexparser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010,2012-2016
# email: laurent@claessens-donadello.eu
import codecs
from latexparser.InputPaths import InputPaths
class Occurrence(object):
"""
self.as_written : the code as it appears in the file, including \MyMacro, including the backslash.
self.position : the position at which this occurrence appears.
Example, if we look at the LatexCode
Hello word, \MyMacro{first}
and then \MyMacro{second}
the first occurrence of \MyMacro has position=12
"""
def __init__(self,name,arguments,as_written="",position=0):
self.arguments = arguments
self.number_of_arguments = len(arguments)
self.name = name
self.as_written = as_written
self.arguments_list = arguments
self.position = position
def configuration(self):
r"""
Return the way the arguments are separated in as_written.
Example, if we have
\MyMacro<space>{A}<tab>{B}
{C},
we return the list
["<space>","tab","\n"]
The following has to be true:
self.as_written == self.name+self.configuration()[0]+self.arguments_list[0]+etc.
"""
l=[]
a = self.as_written.split(self.name)[1]
for arg in self.arguments_list:
split = a.split("{"+arg+"}")
separator=split[0]
try:
a=split[1]
except IndexError:
print(self.as_written)
raise
l.append(separator)
return l
def change_argument(self,num,func):
r"""
Apply the function <func> to the <n>th argument of self. Then return a new object.
"""
n=num-1 # Internally, the arguments are numbered from 0.
arguments=self.arguments_list
configuration=self.configuration()
arguments[n]=func(arguments[n])
new_text=self.name
if len(arguments) != len(configuration):
print("Error : length of the configuration list has to be the same as the number of arguments")
raise ValueError
for i in range(len(arguments)):
new_text=new_text+configuration[i]+"{"+arguments[i]+"}"
return Occurrence(self.name,arguments,new_text,self.position)
def analyse(self):
return globals()["Occurrence_"+self.name[1:]](self) # We have to remove the initial "\" in the name of the macro.
def __getitem__(self,a):
return self.arguments[a]
def __str__(self):
return self.as_written
class Occurrence_newlabel(object):
r"""
takes an occurrence of \newlabel and creates an object which contains the information.
In the self.section_name we remove "\relax" from the string.
"""
def __init__(self,occurrence):
self.occurrence = occurrence
self.arguments = self.occurrence.arguments
if len(self.arguments) == 0 :
self.name = "Non interesting; probably
|
the definition"
self.listoche = [None,None,None,None,None]
self.value,self.page,self.section_name,self.fourth,self.fifth=(None,None,None,None,None)
else :
self.name = self.arguments[0][0]
self.listoche = [a[0] for a in SearchArguments(s
|
elf.arguments[1][0],5)[0]]
self.value = self.listoche[0]
self.page = self.listoche[1]
self.section_name = self.listoche[2].replace(r"\relax","")
self.fourth = self.listoche[3] # I don't know the role of the fourth argument of \newlabel
self.fifth = self.listoche[4] # I don't know the role of the fifth argument of \newlabel
class Occurrence_addInputPath(object):
def __init__(self,Occurrence):
self.directory=Occurrence[0]
class Occurrence_cite(object):
def __init__(self,occurrence):
self.label = occurrence[0]
def entry(self,codeBibtex):
return codeBibtex[self.label]
class Occurrence_newcommand(object):
def __init__(self,occurrence):
self.occurrence = occurrence
self.number_of_arguments = 0
if self.occurrence[1][1] == "[]":
self.number_of_arguments = self.occurrence[1][0]
self.name = self.occurrence[0][0]#[0]
self.definition = self.occurrence[-1][0]
class Occurrence_label(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_ref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_eqref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_input(Occurrence):
def __init__(self,occurrence):
Occurrence.__init__(self,occurrence.name,occurrence.arguments,as_written=occurrence.as_written,position=occurrence.position)
self.occurrence = occurrence
self.filename = self.occurrence[0]
self.input_paths=InputPaths()
self._file_content=None # Make file_content "lazy"
def file_content(self,input_paths=None):
r"""
return the content of the file corresponding to this occurrence of
\input.
This is not recursive.
- 'input_path' is the list of paths in which we can search for files.
See the macro `\addInputPath` in the file
https://github.com/LaurentClaessens/mazhe/blob/master/configuration.tex
"""
import os.path
# Memoize
if self._file_content is not None :
return self._file_content
# At least, we are searching in the current directory :
if input_paths is None :
raise # Just to know who should do something like that
# Creating the filename
filename=self.filename
strict_filename = filename
if "." not in filename:
strict_filename=filename+".tex"
# Searching for the correct file in the subdirectories
fn=input_paths.get_file(strict_filename)
try:
# Without [:-1] I got an artificial empty line at the end.
text = "".join( codecs.open(fn,"r",encoding="utf8") )[:-1]
except IOError :
print("Warning : file %s not found."%strict_filename)
raise
self._file_content=text
return self._file_content
|
pichuang/OpenNet
|
mininet-patch/examples/cluster/nctu_ec_wired_topo.py
|
Python
|
gpl-2.0
| 3,067
| 0.011412
|
#!/usr/bin/python
'''
nctu_cs_wired_topo.gy
'''
from mininet.cluster.net import MininetCluster
from mininet.cluster.placer import DFSPlacer
from mininet.log import setLogLevel
from mininet.cluster.cli import ClusterCLI as CLI
from mininet.node import Controller, RemoteController
from mininet.topo import Topo
from itertools import combinations
CONTROLLER_IP = "192.168.59.100"
CONTROLLER_PORT = 6633
SERVER_LIST = [ 'mininet1', 'mininet2' ]
class NCTU_EC_Topology( Topo ):
def __init__(self, core=1, agg=6, access=6, host=5, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
self.core_num = core
self.agg_num = agg
self.access_num = access
self.host_num = host
self.sw_id = 1
self.host_id = 1
# Init switch and host list
self.core_sw_list = []
self.agg_sw_list = []
|
self.access_sw_list = []
self.host_list = []
self.create_top_switch( "core", self.core_num, self.core_sw_list )
self.handle_top_down( "agg", self.agg_num, self.core_sw_list, self.agg_sw_list )
self.handl
|
e_top_down( "access", self.access_num, self.agg_sw_list, self.access_sw_list )
self.handle_host( "h", self.host_num, self.host_list )
self.handle_mesh( self.agg_sw_list )
def create_top_switch( self, prefix_name, sw_num, sw_list):
for i in xrange(1, sw_num+1):
sw_list.append(self.addSwitch("{0}{1}".format(prefix_name, i), dpid='{0:x}'.format(self.sw_id)))
self.sw_id += 1
def handle_top_down( self, prefix_name, num, top_list, down_list):
temp = 0
for i in xrange(0, len(top_list)):
for j in xrange(1, num+1):
switch = self.addSwitch("{0}{1}".format(prefix_name, j + temp), dpid='{0:x}'.format(self.sw_id))
self.addLink(top_list[i], switch)
down_list.append(switch)
self.sw_id += 1
temp = j
def handle_host( self, prefix_name, host_num, host_list ):
for i in xrange(0, len(self.access_sw_list)):
for j in xrange(0, host_num):
host = self.addHost('{0}{1}'.format(prefix_name, self.host_id))
# Link to access sw
self.addLink(self.access_sw_list[i], host)
# Append host to list
host_list.append(host)
self.host_id += 1
def handle_mesh( self, sw_list ):
for link in combinations(sw_list, 2):
self.addLink(link[0], link[1])
def RunTestBed():
# NCTU_EC_Topology( Core Switch, Aggregate Switch, Access Switch, Host)
topo = NCTU_EC_Topology(core=1, agg=6, access=6, host=20)
net = MininetCluster( controller=RemoteController, topo=topo, servers=SERVER_LIST, placement=DFSPlacer, root_node="core1", tunneling="vxlan" )
net.addController( 'controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel('info')
RunTestBed()
|
VictorAlessander/Smith
|
setup.py
|
Python
|
gpl-3.0
| 1,730
| 0.000578
|
import io
from setuptools import (
setup,
find_packages,
) # pylint: disable=no-name-in-module,import-error
def dependencies(file):
with open(file) as f:
return f.read().splitlines()
with io.open("README.md", encoding="utf-8") as infile:
long_description = infile.read()
setup(
name="smith_the_crawler",
packages=find_packages(exclude=("tests", "examples")),
version="0.0.12-alpha",
# license="MIT",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
|
"Programming Language ::
|
Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
python_requires=">=3.1",
description="A webscraper with a sofisticated toolkit to scrap the world",
long_description=long_description,
long_description_content_type="text/markdown",
download_url="https://github.com/VictorAlessander/Smith/archive/refs/tags/v0.0.12-alpha.tar.gz",
author="Victor Alessander",
author_email="victor.alessander.gr@gmail.com",
url="https://github.com/VictorAlessander/Smith",
keywords=[
"crawler",
"webscraping",
"webscraper",
"investments",
"investment",
"invest",
],
install_requires=[
"beautifulsoup4",
"plotly",
"requests",
"pandas",
"fake-useragent",
"openpyxl",
],
# install_requires=dependencies('requirements.txt'),
# tests_require=dependencies("requirements-dev.txt"),
include_package_data=True,
# extras_require={"ipython": ["IPython==5.7.0", "ipywidgets==7.1.0",]},
)
|
googleapis/python-datalabeling
|
samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_get_evaluation_sync.py
|
Python
|
apache-2.0
| 1,513
| 0.000661
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetEvaluation
#
|
NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datalabeling
# [START datalabeling_v1beta1_generated_DataLabelingService_GetEvaluation_sync]
from google.cloud import datalabeling_v1beta1
def sample_get_evaluation():
# Create a
|
client
client = datalabeling_v1beta1.DataLabelingServiceClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.GetEvaluationRequest(
name="name_value",
)
# Make the request
response = client.get_evaluation(request=request)
# Handle the response
print(response)
# [END datalabeling_v1beta1_generated_DataLabelingService_GetEvaluation_sync]
|
MongoEngine/mongoengine
|
mongoengine/fields.py
|
Python
|
mit
| 90,689
| 0.001301
|
import datetime
import decimal
import inspect
import itertools
import re
import socket
import time
import uuid
from io import BytesIO
from operator import itemgetter
import gridfs
import pymongo
from bson import SON, Binary, DBRef, ObjectId
from bson.int64 import Int64
from pymongo import ReturnDocument
try:
import dateutil
except ImportError:
dateutil = None
else:
import dateutil.parser
from mongoengine.base import (
BaseDocument,
BaseField,
ComplexBaseField,
GeoJsonBaseField,
LazyReference,
ObjectIdField,
get_document,
)
from mongoengine.base.utils import LazyRegexCompiler
from mongoengine.common import _import_class
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.errors import (
DoesNotExist,
InvalidQueryError,
ValidationError,
)
from mongoengine.queryset import DO_NOTHING
from mongoengine.queryset.base import BaseQuerySet
from mongoengine.queryset.transform import STRING_OPERATORS
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = (
"StringField",
"URLField",
"EmailField",
"IntField",
"LongField",
"FloatField",
"DecimalField",
"BooleanField",
"DateTimeField",
"DateField",
"ComplexDateTimeField",
"EmbeddedDocumentField",
"ObjectIdField",
"GenericEmbeddedDocumentField",
"DynamicField",
"ListField",
"SortedListField",
"EmbeddedDocumentListField",
"DictField",
"MapField",
"ReferenceField",
"CachedReferenceField",
"LazyReferenceField",
"GenericLazyReferenceField",
"GenericReferenceField",
"BinaryField",
"GridFSError",
"GridFSProxy",
"FileField",
"ImageGridFsProxy",
"ImproperlyConfigured",
"ImageField",
"GeoPointField",
"PointField",
"LineStringField",
"PolygonField",
"SequenceField",
"UUIDField",
"EnumField",
"MultiPointField",
"MultiLineStringField",
"MultiPolygonField",
"GeoJsonBaseField",
)
RECURSIVE_REFERENCE_CONSTANT = "self"
class StringField(BaseField):
"""A unicode string field."""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
"""
:param regex: (optional) A string pattern that will be applied during validation
:param max_length: (optional) A max length that will be applied during validation
:param min_length: (optional) A min length that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super().__init__(**kwargs)
def to_python(self, value):
if isinstance(value, str):
return value
try:
value = value.decode("utf-8")
except Exception:
pass
return value
def validate(self, value):
if not isinstance(value, str):
self.error("StringField only accepts string values")
if self.max_length is not None and len(value) > self.max_length:
self.error("String value is too long")
if self.min_length is not None and len(value) < self.min_length:
self.error("String value is too short")
if self.regex is not None and self.regex.match(value) is None:
self.error("String value did not match validation regex")
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, str):
return value
if op in STRING_OPERATORS:
case_insensitive = op.startswith("i")
op = op.lstrip("i")
flags = re.IGNORECASE if case_insensitive else 0
regex = r"%s"
if op == "startswith":
regex = r"^%s"
elif op == "endswith":
regex = r"%s$"
elif op == "exact":
regex = r"^%s$"
elif op == "wholeword":
regex = r"\b%s\b"
elif op == "regex":
regex = value
if op == "regex":
value = re.compile(regex, flags)
else:
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return super().prepare_query_value(op, value)
class URLField(StringField):
"""A field that validates input as an URL."""
_URL_REGEX = LazyRegexCompiler(
r"^(?:[a-z0-9\.\-]*)://" # scheme is validated separately
r"(?:(?:[A-Z0-9](?:[A-Z0-9-_]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|" # domain...
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|" # ...or ipv4
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" # ...or ipv6
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
_URL_SCHEMES = ["http", "https", "ftp", "ftps"]
def __init__(self, url_regex=None, schemes=None, **kwargs):
"""
:param url_regex: (optional) Overwrite the default regex used for validation
:param schemes: (optional) Overwrite the default URL schemes that are allowed
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.url_regex = url_regex or self._URL_REGEX
self.schemes = schemes or self._URL_SCHEMES
super().__init__(**kwargs)
def validate(self, value):
# Check first if the scheme is valid
scheme = value.split("://")[0].lower()
if scheme not in self.schemes:
self.error(f"Invalid scheme {scheme} in URL: {value}")
# Then check full URL
if not self.url_regex.match(value):
self.error(f"Invalid URL: {value}")
class EmailField(StringField):
"""A field that validates input as an email address."""
USER_REGEX = LazyRegexCompiler(
# `dot-atom` defined in RFC 5322 Section 3.2.3.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z"
# `quoted-string` defined in RFC 5322 Section 3.2.4.
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)',
re.IGNORECASE,
)
UTF8_USER_REGEX = LazyRegexCompiler(
(
# RFC 6531 Section 3.3 extends `atext` (used by dot-atom) to
# include `UTF8-non-ascii`.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+)*\Z"
# `quoted-string`
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)'
),
re.IGNORECASE | re.UNICODE,
)
DOMAIN_REGEX = LazyRegexCompiler(
r"((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z",
re.IGNORECASE,
)
error_msg = "Invalid email address: %s"
def __init__(
self,
domain_whitelist=None,
|
allow_utf8_user=False,
allow_ip_domain=False,
*args,
**kwargs,
):
"""
:param domain_whitelist: (optional) list of valid domain names applied during validation
:param allow_utf8_user: Allow user part of the email to contain utf8
|
char
:param allow_ip_domain: Allow domain part of the email to be an IPv4 or IPv6 address
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.domain_whitelist = domain_whitelist or []
self.allow_utf8_user = allow_utf8_user
self.allow_ip_domain = allow_ip_domain
super().__init__(*args, **kwargs)
def validate_user_part(self, user_part):
"""Validate the user part of the email address. Return True if
valid and False otherwise.
"""
if self.allow_utf8_user:
return self.UTF8_USER_REGEX.match(user_part)
|
michel-rodrigues/ecommerce2
|
source/carts/migrations/0008_cart_tax_percentage.py
|
Python
|
gpl-3.0
| 489
| 0.002045
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-21 14:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0007_auto_20161221_1337'),
]
operations = [
migrations.AddField(
m
|
odel_name='cart',
name='tax_percentage',
field=models.DecimalField(deci
|
mal_places=3, default=0.085, max_digits=20),
),
]
|
blopker/Color-Switch
|
colorswitch/http/downloaders/urllib.py
|
Python
|
mit
| 1,149
| 0.002611
|
from .downloader_base import DownloaderBase
from ... import logger
log = logger.get(__name__)
import traceback
import json
from urllib import request, error
try:
import ssl
SSL = True
except ImportError:
SSL = False
def is_available():
return SSL
class UrllibDownloader(DownloaderBase):
"""Downloader that
|
uses the native Python HTTP library.
Does not verify HTTPS certificates... """
def get(self, url):
try:
log.debug('Urllib downloader getting url %s', url)
result = request.urlopen(url)
except error.URLError as e:
log.error('Urllib downloader failed: %s' % e.reason)
traceback.print_exc()
|
result = b''
if result.getcode() >= 400:
return b''
return result.read()
def get_json(self, url):
a = self.get(url)
if a:
try:
a = json.loads(a.decode('utf-8'))
except ValueError:
log.error('URL %s does not contain a JSON file.', url)
return False
return a
def get_file(self, url):
return self.get(url)
|
sravan953/pulseq-gpi
|
pulseq2jemris/jemris_nodes/JMakeDelaySequence_GPI.py
|
Python
|
gpl-3.0
| 990
| 0.00404
|
import gpi
class ExternalNode(gpi.NodeAPI):
"""This Node provides allows the user to make a DelaySequence for Jemris."""
def initUI(self):
# Widgets
self.delay_labels = ['Name', 'Observe', 'ADCs', 'Aux1', 'Aux2', 'Aux3', 'Delay', 'DelayType', 'HardwareMode',
'PhaseLock', 'StartSeq', 'StopSeq', 'Vector']
[self.addWidget('StringBox', label) for label in self.delay_labels]
self.addWidget('PushButton', 'ComputeEvents', button_title="Compute events")
# IO Ports
self.addOutPort('DelaySequence', 'LIST')
return 0
def compute(self):
if 'ComputeEvents' in self.widgetEvents() or '_INIT_EVEN
|
T_' in self.getEvents():
delay_seq = {'DelaySequence': True}
for label in self.delay_labels:
if self.getVal(label) != '':
delay_seq[label] = self
|
.getVal(label)
self.setData('DelaySequence', [delay_seq])
return 0
|
cmaclell/pyAFM
|
pyafm/roll_up.py
|
Python
|
mit
| 7,059
| 0
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import csv
from dateutil.parser import parse
def write_problem(steps, problem_views, kc_ops, row_count, kc_model_names,
out):
# variable to store rolled up steps
rollup = []
for s in steps:
# sort transactions within a step by time (should be sorted already,
# but just in case)
steps[s].sort(key=lambda x: x['time'])
# update variables for first attempt
student = steps[s][0]['anon student id']
problem_name = steps[s][0]['problem name']
step_name = s
step_start_time = steps[s][0]['time']
first_transaction_time = steps[s][0]['time']
correct_transaction_time = ""
step_end_time = steps[s][0]['time']
first_attempt = steps[s][0]['outcome'].lower()
incorrects = 0
corrects = 0
hints = 0
kc_sets = {kc_mod: set() for kc_mod in kc_model_names}
# update variables for non-first attempt transactions
for t in steps[s]:
step_end_time = t['time']
if t['outcome'].lower() == 'correct':
correct_transaction_time = t['time']
corrects += 1
elif t['outcome'].lower() == 'incorrect':
incorrects += 1
el
|
if t['outcome'].lower() == 'hint':
hints += 1
for kc_mod in kc_model_names:
for kc in t[kc_mod].split("~~"):
kc_sets[kc_mod].add(kc)
# for each rolled up step, we need to increment the KC counts.
kc_to_write = []
for kc_mod in kc_model_names:
model_
|
name = kc_mod[4:-1]
kcs = list(kc_sets[kc_mod])
kc_to_write.append("~~".join(kcs))
if model_name not in kc_ops:
kc_ops[model_name] = {}
ops = []
for kc in kcs:
if kc not in kc_ops[model_name]:
kc_ops[model_name][kc] = 0
kc_ops[model_name][kc] += 1
ops.append(str(kc_ops[model_name][kc]))
kc_to_write.append("~~".join(ops))
# add rolled up step to rollup
rolled_up_step = [str(row_count),
student,
problem_name,
str(problem_views),
step_name,
step_start_time,
first_transaction_time,
correct_transaction_time,
step_end_time,
first_attempt,
str(incorrects),
str(corrects),
str(hints)]
rolled_up_step.extend(kc_to_write)
row_count += 1
rollup.append(rolled_up_step)
# sort the rolled up steps by step start time
rollup.sort(key=lambda x: x[5])
for line_to_write in rollup:
out.write('\t'.join(line_to_write)+'\n')
return row_count
def transaction_to_student_step(datashop_file):
out_file = datashop_file.name[:-4]+'-rollup.txt'
students = {}
header = None
for row in csv.reader(datashop_file, delimiter='\t'):
if header is None:
header = row
continue
line = {}
kc_mods = {}
for i, h in enumerate(header):
if h[:4] == 'KC (':
line[h] = row[i]
if h not in kc_mods:
kc_mods[h] = []
if line[h] != "":
kc_mods[h].append(line[h])
continue
else:
h = h.lower()
line[h] = row[i]
if 'step name' in line:
pass
elif 'selection' in line and 'action' in line:
line['step name'] = line['selection'] + ' ' + line['action']
else:
raise Exception(
'No fields present to make step names, either add a "Step'
' Name" column or "Selection" and "Action" columns.')
if 'step name' in line and 'problem name' in line:
line['prob step'] = line['problem name'] + ' ' + line['step name']
for km in kc_mods:
line[km] = '~~'.join(kc_mods[km])
if line['anon student id'] not in students:
students[line['anon student id']] = []
students[line['anon student id']].append(line)
kc_model_names = list(set(kc_mods))
row_count = 0
with open(out_file, 'w') as out:
new_head = ['Row',
'Anon Student Id',
'Problem Name',
'Problem View',
'Step Name',
'Step Start Time',
'First Transaction Time',
'Correct Transaction Time',
'Step End Time',
'First Attempt',
'Incorrects',
'Corrects',
'Hints', ]
out.write('\t'.join(new_head))
for km in kc_model_names:
out.write('\t'+km+'\tOpportunity ('+km[4:])
out.write('\n')
stu_list = list(students.keys())
sorted(stu_list)
for stu in stu_list:
transactions = students[stu]
transactions = sorted(transactions, key=lambda k: parse(k['time']))
problem_views = {}
kc_ops = {}
row_count = 0
steps = {}
problem_name = ""
# Start iterating through the stuff.
for i, t in enumerate(transactions):
if problem_name != t['problem name']:
# we don't need to write the first row, because we don't
# have anything yet.
if i != 0:
if problem_name not in problem_views:
problem_views[problem_name] = 0
problem_views[problem_name] += 1
row_count = write_problem(steps,
problem_views[problem_name],
kc_ops, row_count,
kc_model_names, out)
steps = {}
if t['step name'] not in steps:
steps[t['step name']] = []
steps[t['step name']].append(t)
problem_name = t['problem name']
# need to write the last problem
if problem_name not in problem_views:
problem_views[problem_name] = 0
problem_views[problem_name] += 1
row_count = write_problem(steps, problem_views[problem_name],
kc_ops, row_count, kc_model_names, out)
steps = {}
print('transaction file rolled up into:', out_file)
return out_file
|
outoftime/learnpad
|
tools/npx.py
|
Python
|
mit
| 171
| 0
|
#!/usr/bin/env python
from util import nodeenv
|
_delegate
from setup import setup
if __name__ == "__main__":
setup(skip_dependencies=True)
nodeenv_delegate("npx
|
")
|
tedye/leetcode
|
Python/leetcode.059.spiral-matrix-ii.py
|
Python
|
mit
| 1,134
| 0.013228
|
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n <= 0:
return []
if n == 1:
return [[1]]
matrix = [[None] * n for _ in range(n)]
x = y = 0
direction = [(0,1),(1,0),(0,-1),(-1,0)]
count = 1
l = 0
r = n-1
u = 0
d = n-1
dc = 0
while l <= r or u <= d:
if l <= x <= r and u <= y <= d:
matrix[y][x] = count
count += 1
y += direction[dc&3][0]
x += direction[dc&3][1]
elif x > r:
|
u += 1
x -= 1
y += 1
dc += 1
elif y > d:
|
r -= 1
y -= 1
x -= 1
dc +=1
elif x < l:
d -= 1
x += 1
y -= 1
dc += 1
elif y < u:
l += 1
y += 1
x += 1
dc += 1
return matrix
|
hbenniou/trunk
|
doc/sphinx/book/confReference.py
|
Python
|
gpl-2.0
| 26,536
| 0.030762
|
# -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
sys.path.append(os.path.abspath('./..'))
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif targ
|
et.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if
|
writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(
|
zarnold/transitfeed
|
extensions/googletransit/pybcp47/__init__.py
|
Python
|
apache-2.0
| 637
| 0
|
#!/usr/bin/python2.5
# Copyright (C) 2011 Google Inc.
#
# Licensed under
|
the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# li
|
mitations under the License.
from bcp47languageparser import *
|
ntthuy11/CodeFights
|
Arcade/04_Python/07_CaravanOfCollections/frequencyAnalysis.py
|
Python
|
mit
| 1,131
| 0.000884
|
""" You've recently read "The Gold-Bug" by Edgar Allan Poe, and was so impressed by the cryptogram in it that
decided to try and decipher an encrypted text yourself. You asked your friend to encode a piece of text using
a substitution cipher, and now have an encryptedText that you'd like to decipher.
The encryption process in the story you read involves frequency analysis: it is known that letter 'e' is the
most frequent one in the English language, so it's pretty safe to assume that the most common character in the
encryptedText stands for 'e'. To begin with, implement a function that will find the most frequent character
in the given encryptedText.
Example
For encryptedText = "$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", the output should be
frequencyAnalysis(encryptedText) = 'C'.
Letter 'C' appears in the text more than any other character (4 times), which is why it is the answer.
""
from collections import Counter # "Counter" is what CodeFights asks for
def frequencyAnalysis(encryptedText):
return max(Counter(encryptedText), key=Counter(encryptedText).g
|
et) # CodeFights asks to change this line only
| |
RealTimeWeb/datasets
|
preprocess/medal_of_honor/fix.py
|
Python
|
gpl-2.0
| 3,268
| 0.004896
|
import json
with open('medalofhonor-old.json') as input:
data = json.load(input)
months = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12,
}
def force_int(num):
try:
return int(num)
except TypeError:
return -1
except ValueError:
return -1
def parse_locdate(record):
if ',' in record['Place / Date']:
try:
year, month, day = record['Place / Date'].rsplit(",", 1)[1].strip().split(" ")
year, day = int(year), int(day)
month = months[month]
return [year, month, day]
except KeyError:
return [int(record['Place / Date'][-4:]), -1, -1]
except (IndexError, ValueError) as e:
return [-1, -1, -1]
else:
return [-1, -1, -1]
def parse_birth(record):
if ',' in record['Born']:
date, location = record['Born'].split(",", 1)
try:
year, month, day = date.split(" ")
year, day = int(year), int(day)
month = months[month]
return [year, mont
|
h, day, loca
|
tion]
except:
return [-1, -1, -1, record['Born']]
else:
try:
return [-1, -1, int(record['Born']), ""]
except ValueError:
return [-1, -1, -1, record['Born']]
from pprint import pprint
[parse_locdate(record) for record in data]
new_data = [
{
'name': record['name'].title(),
'death': record['Departed'] == 'Yes',
'awarded': {
'date': {
'year': parse_locdate(record)[2],
'month': parse_locdate(record)[1],
'day': parse_locdate(record)[0],
'full': '{}-{}-{}'.format(parse_locdate(record)[2], parse_locdate(record)[1], parse_locdate(record)[0])
},
'location': {
'latitude': force_int(record['location'].get('latitude', 0)),
'longitude': force_int(record['location'].get('longitude', 0)),
'name': record['location'].get('name', 'Unknown'),
},
'General Order number': force_int(record['G.O. Number']),
'citation': record['citation'],
'issued': record['Date of Issue'],
'accredited to': record['Accredited To']
},
'birth': {
'date': {
'year': parse_birth(record)[2],
'month': parse_birth(record)[1],
'day': parse_birth(record)[0],
},
'location name': parse_birth(record)[3]
},
'military record': {
'rank': record['Rank'],
'division': record['Division'],
'organization': record['Organization'],
'company': record['Company'],
'entered service at': record['Entered Service At']
},
'metadata': {
'link': record['link']
}
} for record in data]
new_data = list(sorted(new_data, key=lambda r: r['awarded']['issued']))
with open('medal_of_honor.json', 'w') as output:
json.dump(new_data, output)
|
noselasd/testrunner
|
testrunner.py
|
Python
|
mit
| 21,002
| 0.005142
|
#!/usr/bin/env python
import sys
import re
import subprocess
import os
import optparse
import datetime
import inspect
import threading
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
VERSION = '1.0.0'
ALL_TESTS = []
LOGFILE = None
CURRENT_SUITE = 'default'
DEFAULT_TEST_TIMEOUT = 30
#See README for detailed info
#Start a new test suite
def DefSuite(suite):
global CURRENT_SUITE
CURRENT_SUITE = suite
#Define a test - to be called in the testsuite files
def DefTest(cmd, name, success_codes=None, timeout=None):
if success_codes is None:
success_codes = [0]
if name in set(t.name for t in ALL_TESTS):
raise NameError('The test name ''%s'' is already defined' % name)
if not timeout:
timeout = int(DEFAULT_TEST_TIMEOUT)
#Figure out the file and line where the test is defined
frame = inspect.stack()[1]
cwd = os.path.dirname(inspect.getfile(frame[0])) or './'
test_location = {'cwd': cwd,
'filename': inspect.getfile(frame[0]),
'lineno': frame[0].f_lineno
}
t = TestCase(test_location, cmd, name, CURRENT_SUITE, success_codes, timeout)
ALL_TESTS.append(t)
class SimpleEnum(object):
def __str__(self):
return self.__class__.__name__
def __eq__(self, other):
|
return self.__class__ == other.__class__
def __repr__(self):
return str(self)
class TestResult(object):
class PASS(SimpleEnum):
pass
class FAIL(SimpleEnum):
pass
class TIMEDOUT(SimpleEnum):
pass
class NOTRUN(Si
|
mpleEnum):
pass
class TestFailure(object):
def __init__(self, test, msg):
self.test_name = test.name
self.result = test.result
self.msg = msg
def __str__(self):
return '%s %s:\n%s' % (self.result, self.test_name, self.msg)
def __repr__(self):
return str(self)
class MultiDelegate(object):
def __init__(self):
self.delegates = []
def __getattr__(self, name):
def handler(*args, **kwargs):
for d in self.delegates:
method = getattr(d, name)
method(*args, **kwargs)
return handler
class TerminalLog(object):
GREEN = '\033[92m'
RED = '\033[91m'
ENDC = '\033[0m'
def __init__(self, out = sys.stdout, verbose=False, show_command=False):
self.out = out
self.verbose = verbose
self.show_command = show_command
self.colorize = out.isatty()
def maybe_color(self, s, color):
if self.colorize:
return color + s + self.ENDC
else:
return s
def begin(self):
self.out.write(
'''## Testsuite started
## Invocation: %s
## Time: %s
''' % (' '.join(sys.argv), str(datetime.datetime.now())))
def start_suite(self, suite):
self.out.write('\n## Running testsuite: %s\n' % suite)
self.out.flush()
def start_test(self, test):
if self.show_command:
self.out.write(' Command: %s\n' % test.cmd)
self.out.write(' %-70s' % test.name)
self.out.flush()
def end_test(self, test):
if test.result == TestResult.PASS():
msg = self.maybe_color(str(test.result), self.GREEN)
elif test.result == TestResult.NOTRUN():
msg = str(test.result)
else:
msg = self.maybe_color(str(test.result), self.RED)
self.out.write('%s\n' % msg)
if self.verbose:
# might already shown the command
if test.errors:
self.out.write('Failed command: %s\n' % test.cmd)
for err in test.errors:
self.out.write('\n%s\n\n' % err)
self.out.flush()
def end(self, num_tests, num_failures):
self.out.write('\n')
if num_failures:
self.out.write(self.maybe_color('%d of %d tests failed\n' %
(num_failures, num_tests), self.RED))
else:
self.out.write(self.maybe_color('All %d tests passed\n' %
num_tests, self.GREEN))
if LOGFILE:
self.out.write('View complete log in the %s file.\n' % (LOGFILE))
self.out.flush()
class TextLog(object):
def __init__(self, logfile_name, verbose = False):
self.out = open(logfile_name, 'w')
self.logfile_name = logfile_name
self.verbose = verbose
def begin(self):
self.out.write(
'''## Testsuite started
## Time: %s
## Invocation: %s
''' % (str(datetime.datetime.now()), ' '.join(sys.argv)))
def start_suite(self, suite):
self.out.write('\n## Running testsuite: %s\n' % suite)
self.out.flush()
def start_test(self, test):
self.out.write('\n## Test: %s\n' % test.name)
self.out.write('## Command: %s\n' % test.cmd)
def end_test(self, test):
duration = timedelta_total_seconds(test.end_time - test.start_time)
self.out.write('## Duration: %f sec.\n' % duration)
self.out.write('## Result: %s\n' % test.result)
if test.errors:
self.out.write('## %s failures:\n' % str(test))
for err in test.errors:
self.out.write('\n%s\n' % err)
self.out.flush()
def end(self, num_tests, num_failures):
self.out.write('\n')
if num_failures:
self.out.write('%d of %d tests failed\n' % (num_failures, num_tests))
else:
self.out.write('All %d tests passed\n' % num_tests)
self.out.close()
class XMLLog(object):
def __init__(self, logfile_name):
self.out = open(logfile_name, 'w')
self.logfile_name = logfile_name
self.xml_doc = XMLGenerator(self.out, 'utf-8')
self.suite_started = False
def begin(self):
self.xml_doc.startDocument()
self.xml_doc.startElement('testsuites',AttributesImpl({}))
self.xml_doc.characters('\n')
self.xml_doc.startElement('invocation',AttributesImpl({}))
self.xml_doc.characters(' '.join(sys.argv))
self.xml_doc.endElement('invocation')
self.xml_doc.characters('\n')
def start_suite(self, suite):
if self.suite_started:
self.xml_doc.endElement('testsuite')
self.xml_doc.characters('\n')
self.suite_started = True
attrs = AttributesImpl({'name': suite})
self.xml_doc.startElement('testsuite', attrs)
self.xml_doc.characters('\n')
def start_test(self, test):
attrs = AttributesImpl({'name': test.name})
self.xml_doc.startElement('testcase', attrs)
self.xml_doc.characters('\n')
def end_test(self, test):
duration = timedelta_total_seconds(test.end_time - test.start_time)
self.xml_doc.startElement('duration',AttributesImpl({}))
self.xml_doc.characters(str(duration))
self.xml_doc.endElement('duration')
self.xml_doc.characters('\n')
attrs = AttributesImpl({})
self.xml_doc.startElement('result', attrs)
self.xml_doc.characters(str(test.result))
self.xml_doc.endElement('result')
self.xml_doc.characters('\n')
if test.errors:
self.xml_doc.startElement('errors', attrs)
self.xml_doc.characters('\n')
for err in test.errors:
self.xml_doc.startElement('error', attrs)
self.xml_doc.characters(str(err))
self.xml_doc.endElement('error')
self.xml_doc.characters('\n')
self.xml_doc.endElement('errors')
self.xml_doc.endElement('testcase')
self.xml_doc.characters('\n')
def end(self, num_tests, num_failures):
if self.suite_started:
self.xml_doc.endElement('testsuite')
self.xml_doc.characters('\n')
attrs = AttributesImpl({'tests': str(num_tests),
'failures': str(num_failures)})
self.xml_doc.startElement('result', attrs)
if num_failures:
self.xml_doc.characters(str(TestResult.FAIL()))
else
|
acrisci/i3ipc-glib
|
test/test_get_config.py
|
Python
|
gpl-3.0
| 347
| 0
|
from ipctest import IpcTest
from gi.repository import i3ipc
import pytest
@pyte
|
st.mark.skip(reason='TODO')
class TestGetConfig(IpcTest):
def test_get_config(self, i3):
config = i3.get_config()
assert isin
|
stance(config, i3ipc.ConfigReply)
with open('test/i3.config') as f:
assert config.config == f.read()
|
DESHRAJ/fjord
|
vendor/packages/nose/functional_tests/support/twist/test_twisted.py
|
Python
|
bsd-3-clause
| 304
| 0.003289
|
from twisted.trial import unittest
class
|
TestTwisted(unittest.TestCase):
def test(self):
pass
def test_fail(self):
self.fail("I failed")
def test_error(self):
raise TypeError("oops, wrong type")
def test_skip(self):
r
|
aise unittest.SkipTest('skip me')
|
CSchool/SchoolSite
|
CSchoolSite/main/migrations/0002_notification_queued.py
|
Python
|
apache-2.0
| 457
| 0
|
# -*- coding: utf-8 -*-
# Gen
|
erated by Django 1.11 on 2017-05-04 12:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial')
|
,
]
operations = [
migrations.AddField(
model_name='notification',
name='queued',
field=models.BooleanField(db_index=True, default=False),
),
]
|
chirilo/remo
|
vendor-local/lib/python/rest_framework/renderers.py
|
Python
|
bsd-3-clause
| 24,971
| 0.000921
|
"""
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer the renders the browsable API.
"""
from __future__ import unicode_literals
import json
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Page
from django.http.multipartparser import parse_header
from django.template import Context, RequestContext, loader, Template
from django.test.client import encode_multipart
from django.utils import six
from rest_framework import exceptions, serializers, status, VERSION
from rest_framework.compat import SHORT_SEPARATORS, LONG_SEPARATORS, INDENT_SEPARATORS
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.request import is_form_media_type, override_method
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplementedError('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: http://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
|
The data supplied to the Response object should be
|
a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
context = self.resolve_context(data, request, response)
return template.render(context)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def resolve_context(self, data, request, response):
if response.exception:
data['status_code'] = response.status_code
return RequestContext(request, data)
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured(
'Returned a template response with no `template_name` attribute set on either the view or response'
)
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
rendere
|
meidli/yabgp
|
yabgp/handler/default_handler.py
|
Python
|
apache-2.0
| 7,759
| 0.000516
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# import json
# replace with simplejson
import simplejson as json
import os
import time
import logging
import traceback
import sys
from oslo_config import cfg
from yabgp.common import constants as bgp_cons
from yabgp.handler import BaseHandler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MSG_PROCESS_OPTS = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ.get('HOME') or '.', 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk')
]
CONF.register_opts(MSG_PROCESS_OPTS, group='message')
class DefaultHandler(BaseHandler):
def __init__(self):
super(DefaultHandler, self).__init__()
'''
{<peer>: (<path>, <current file>)}
'''
self.peer_files = {}
'''
{<peer>: <seq number>}
'''
self.msg_sequence = {}
def init(self):
if CONF.message.write_disk:
self.init_msg_file(CONF.bgp.running_config['remote_addr'].lower())
def init_msg_file(self, peer_addr):
msg_file_path_for_peer = os.path.join(
CONF.message.write_dir,
peer_addr
)
if not os.path.exists(msg_file_path_for_peer):
os.makedirs(msg_file_path_for_peer)
LOG.info('Create dir %s for peer %s', msg_file_path_for_peer, peer_addr)
LOG.info('BGP message file path is %s', msg_file_path_for_peer)
if msg_file_path_for_peer and peer_addr not in self.peer_files:
msg_path = msg_file_path_for_peer + '/msg/'
if not os.path.exists(msg_path):
os.makedirs(msg_path)
# try get latest file and msg sequence if any
last_msg_seq, msg_file_name = DefaultHandler.get_last_seq_and_file(msg_path)
if not msg_file_name:
msg_file_name = "%s.msg" % time.time()
# store the message sequence
self.msg_sequence[peer_addr] = last_msg_seq + 1
msg_file = open(os.path.join(msg_path, msg_file_name), 'a')
msg_file.flush()
self.peer_files[peer_addr] = (msg_path, msg_file)
LOG.info('BGP message file %s', msg_file_name)
LOG.info('The last bgp message seq number is %s', last_msg_seq)
@staticmethod
def get_last_seq_and_file(msg_path):
"""
Get the last sequence number in the latest log file.
"""
LOG.info('get the last bgp message seq for this peer')
last_seq = 0
# first get the last file
file_list = os.listdir(msg_path)
if not file_list:
return last_seq, None
file_list.sort()
msg_file_name = file_list[-1]
try:
with open(msg_path + msg_file_name, 'r') as fh:
line = None
for line in fh:
pass
last = line
if line:
if last.startswith('['):
last_seq = eval(last)[1]
elif last.startswith('{'):
last_seq = json.loads(last)['seq']
except OSError:
LOG.error('Error when reading bgp message files')
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error(e)
sys.exit()
return last_seq, msg_file_name
def write_msg(self, peer, timestamp, msg_type, msg):
"""
write bgp message into local disk file
:param peer: peer address
:param timestamp: timestamp
:param msg_type: message type (0,1,2,3,4,5,6)
:param msg: message dict
:param msg_path: path to store messages on disk
:return:
"""
msg_path, msg_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
msg_seq = self.msg_sequence[peer.lower()]
msg_record = {
't': timestamp,
'seq': msg_seq,
'type': msg_type
}
msg_record.update(msg)
try:
json.dump(msg_record, msg_file)
except Exception as e:
LOG.error(e)
LOG.info('raw message %s', msg)
msg_file.write('\n')
self.msg_sequence[peer.lower()] += 1
msg_file.flush()
os.fsync(msg_file.fileno())
def check_file_size(self, peer):
"""if the size of the msg file is bigger than 'max_msg_file_size',
then save as and re-open a new file.
"""
msg_path, cur_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
if os.path.getsize(cur_file.name) >= CONF.message.write_msg_max_size:
cur_file.close()
msg_file_name = "%s.msg"
|
% time.time()
LOG.info('Open a new message file %s', msg_file_name)
msg_file = open(os.path.join(msg_path + msg_file_name), 'a')
self.peer_files[peer.lower()] = (msg_path, msg_file)
return True
return False
def on_update_error(self, peer, timestamp, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=6
|
,
msg={'msg': msg}
)
def update_received(self, peer, timestamp, msg):
# write message to disk
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=bgp_cons.MSG_UPDATE,
msg={"msg": msg}
)
self.check_file_size(peer.factory.peer_addr)
def keepalive_received(self, peer, timestamp):
"""
keepalive message default handler
:param peer:
:param timestamp:
:return:
"""
if peer.msg_recv_stat['Keepalives'] == 1:
# do something with the connection establish event
pass
if CONF.message.write_keepalive:
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=4,
msg={"msg": None}
)
def open_received(self, peer, timestamp, result):
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=1,
msg={"msg": result}
)
def route_refresh_received(self, peer, msg, msg_type):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=msg_type,
msg={"msg": msg}
)
def notification_received(self, peer, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=3,
msg={"msg": msg}
)
def on_connection_lost(self, peer):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=bgp_cons.MSG_BGP_CLOSED,
msg={"msg": None}
)
def on_connection_failed(self, peer, msg):
self.write_msg(
peer=peer,
timestamp=time.time(),
msg_type=0,
msg={"msg": msg}
)
def on_established(self, peer, msg):
pass
|
msztolcman/ff
|
test/mocks/__init__.py
|
Python
|
mit
| 190
| 0.005263
|
#!/usr/bin/en
|
v python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
import os, os.path
import re
import sys
from pprint import
|
pprint, pformat
|
ronaldahmed/robot-navigation
|
neural-navigation-with-lstm/MARCO/nltk/parser/__init__.py
|
Python
|
mit
| 52,728
| 0.002731
|
# Natural Language Toolkit: Parsers
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# Scott Currie <sccurrie@seas.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py,v 1.1.1.2 2004/09/29 21:58:23 adastra Exp $
"""
Classes and interfaces for producing tree structures that represent
the internal organziation of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParserI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduceParser} and C{RecursiveDescentParser}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.chunk} defines chunk parsing, which identifies
non-overlapping linguistic groups in a text.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
@group Interfaces: ParserI
@group Parsers: ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser
@sort: ParserI, ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser,
demo, chart, chunk, probabilistic
@see: C{nltk.cfg}
"""
from nltk import TaskI, PropertyIndirectionMixIn
from nltk.tree import Tree, ImmutableTree
from nltk.token import Token
from nltk.cfg import Nonterminal, CFG, CFGProduction, nonterminals
from nltk.chktype import chktype
import types
##//////////////////////////////////////////////////////
## Parser Interface
##//////////////////////////////////////////////////////
class ParserI(TaskI):
"""
A processing class for deriving trees that represent possible
structures for a sequence of tokens. These tree structures are
known as X{parses}. Typically, parsers are used to derive syntax
trees for sentences. But parsers can also be used to derive other
kinds of tree structure, such as morphological trees and discourse
structures.
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def parse(self, token):
"""
Derive a parse tree that represents the structure of the given
token's C{SUBTOKENS}, and output it to the token's C{TREE}
property. If no parse are found, then output C{None}. If
multiple parses are found, then output the best parse.
The parsed trees derive a structure for the subtokens, but do
not modify them. In particular, the leaves of the subtree
should be equal to the list of subtokens.
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
raise NotImplementedError()
def get_parse(self, token):
"""
@return: A parse tree that represents the structure of the
given token's C{SUBTOKENS}. If no parse is found, then return
C{None}.
@rtype: L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A list of the parse trees that could represent the
structure of the given token's C{SUBTOKENS}. When possible,
this list should be sorted from most likely to least likely.
@rtype: C{list} of L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_probs(self, token):
"""
@return: A probability distribution over the parse trees that
could represent the structure of the given token's
C{SUBTOKENS}.
@rtype: L{ProbDistI}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A dictioanry mapping from parse trees that could
represent the structure of the given token's C{SUBTOKENS} to
numeric scores.
@rtype: C{dict}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
##//////////////////////////////////////////////////////
## Abstract Base Class for Parsers
##//////////////////////////////////////////////////////
class AbstractParser(ParserI, PropertyIndirectionMixIn):
"""
An abstract base class for parsers. C{AbstractParser} provides
a default implementation for:
- L{parse} (based on C{get_parse})
- L{get_parse_list} (based on C{get_parse})
- L{get_parse} (based on C{get_parse_list})
Note that subclasses must override either C{get_parse} or
C{get_parse_list} (or both), to avoid infinite recursion.
"""
def __init__(self, **property_names):
"""
Construct a new parser.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParser:
raise AssertionError, "Abstract classes can't be instantiated"
PropertyIndirectionMixIn.__init__(self, **property_names)
def parse(self, token):
TREE = self.property('TREE')
token[TREE] = self.get_parse(token)
def get_parse(self, token):
trees = self.get_parse_list(token)
if len(trees) == 0: return None
else: return trees[0]
def get_parse_list(self, token):
tree = self.get_parse(token)
if tree is None: return []
else: return [tree]
##//////////////////////////////////////////////////////
## Shift/Reduce Parser
##//////////////////////////////////////////////////////
class ShiftReduceParser(AbstractParser):
"""
A simple bottom-up CFG parser that uses two operations, "shift"
and "reduce", to find a single parse for a text.
C{ShiftReduceParser} maintains a stack, which records the
structure of a portion of the text. This stack is a list of
C{Token}s and C{Tree}s that collectively cover a portion of
the text. For example, while parsing the sentence "the dog saw
t
|
he man" with a typical grammar, C{ShiftReduceParser} will produce
the following stack, which covers "the dog saw"::
[(NP: (Det: <'the'>) (N: <'dog'>)), (V: <'saw'>)]
C{ShiftReduceParser} attempts to extend the stack to cover the
entire text, and to combine the stack elements into a single tree,
producing a complete parse for the sentence.
Initially, the stack is empty. It i
|
s extended to cover the text,
from left to right, by repeatedly applying two operations:
- X{shift} moves a token from the beginning of the text to the
end of the stack.
- X{reduce} uses a CFG production to com
|
gagnonlg/explore-ml
|
sngp/tf_import/__init__.py
|
Python
|
gpl-3.0
| 136
| 0
|
from .gaussian_process import RandomFeatureGaussianProcess, mean
|
_field_logits
from .spectral_normalization import SpectralNormaliza
|
tion
|
Alexanderkorn/Automatisation
|
oude scripts/les 5/fibonacci-reeks.py
|
Python
|
gpl-3.0
| 415
| 0.007264
|
__author__ = 'alexander'
def fibonacci(n)
|
:
voorlaatste_cijfer = 1
laatste_cijfer = 1
print(voorlaatste_cijfer)
print(laatste_cijfer)
for i in range(n-2):# n – 2, omdat we de eerste twee cijfers al weten
nieuw_cijfer = laatste_cijfer + voorlaatste_cijfer
print(nieuw_cijfer)
voorlaatste_cijfer = laatste_cijfer
|
laatste_cijfer = nieuw_cijfer
fibonacci(6)
|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_7/ar_/test_artificial_32_RelativeDifference_MovingAverage_7__20.py
|
Python
|
bsd-3-clause
| 276
| 0.083333
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype
|
= "MovingAverage", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count
|
= 20, ar_order = 0);
|
zutshi/S3CAMX
|
src/example_list.py
|
Python
|
bsd-2-clause
| 2,248
| 0.003114
|
from __future__ import print_function
import configparser as cp
import fileOps
|
as f
import err
import logging
#EXAMPLE_LIST_FN = 'example_list'
example_list_str =
|
\
'''0_vanDerPol = ./examples/vanderpol_python/
1_vanDerPol = ./examples/vanderpol_m_file/
2_dc = ./examples/dc_controller_hand_coded/
3_dci = ./examples/dc_controller_hand_coded_input/
4_ex1a = ./examples/ex1a/
5_ex1b = ./examples/ex1b/
6_AbstractFuelControl = ./examples/abstractFuelControl/
7_AbstractFuelControl = ./examples/abstractFuelControlCombined/
8_fuzzy_invp = ./examples/fuzzy_invp/
9_heater = ./examples/heater/
10_GIF = ./examples/GI_fisher/'''
logger = logging.getLogger(__name__)
def get_example_list_old():
## ##!!##logger.debug('reading example listing: {}'.format(EXAMPLE_LIST_FN))
example_dict = cp.parse_config(example_list_str)
# TODO: Remedy below hack
# Ugly hack: parse_config() adds type = string
# To fix it, we just delete it.
del example_dict['type']
example_list = [0] * len(example_dict)
for (k, v) in example_dict.iteritems():
d = {}
(n, k) = k.split('_', 1) # split only on the first '_'
d['filename'] = k + '.tst'
d['path'] = v
d['description'] = v + k
example_list[int(n)] = d
return example_list
def get_example_list():
return crawl_examples()
def crawl_examples():
EXAMPLE_DICT = './examples/'
TST_FILE_GLOB_PATTERN = '*.tst'
example_list = []
sub_dir_list = f.get_sub_dir_listing(EXAMPLE_DICT)
for sub_dir in sub_dir_list:
file_name_list = f.get_file_list_matching(TST_FILE_GLOB_PATTERN, sub_dir)
if len(file_name_list) > 1:
raise err.Fatal('More than one .tst file found!! {}'.format(file_name_list))
if len(file_name_list) != 0:
file_path = f.get_abs_base_path(file_name_list[0])
system_name = f.get_file_name_from_path(file_name_list[0])
d = {}
d['filename'] = system_name
d['path'] = file_path
d['description'] = '{:-<50} {}'.format(system_name, file_path)
example_list.append(d)
return example_list
if __name__ == '__main__':
for i in crawl_examples():
print(i['description'])
|
AntonioMtn/NZBMegaSearch
|
werkzeug/contrib/kickstart.py
|
Python
|
gpl-2.0
| 11,308
| 0.000354
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.kickstart
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides some simple shortcuts to make using Werkzeug simpler
for small scripts.
These improvements include predefined `Request` and `Response` objects as
well as a predefined `Application` object which can be customized in child
classes, of course. The `Request` and `Reponse` objects handle URL
generation as well as sessions via `werkzeug.contrib.sessions` and are
purely optional.
There is also some integration of template engines. The template loaders
are, of course, not neccessary to use the template engines in Werkzeug,
but they provide a common interface. Currently supported template engines
include Werkzeug's minitmpl and Genshi_. Support for other engines can be
added in a trivial way. These loaders provide a template interface
similar to the one used by Django_.
.. _Genshi: http://genshi.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from os import path
from ..wrappers import Request as RequestBase, Response as ResponseBase
from ..templates import Template
from ..exceptions import HTTPException
from ..routing import RequestRedirect
__all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader',
'GenshiTemplateLoader', 'Application']
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.kickstart is deprecated and '
'will be removed in Werkzeug 1.0'))
class Request(RequestBase):
"""A handy subclass of the base request that adds a URL builder.
It when supplied a session store, it is also able to handle sessions.
"""
def __init__(self, environ, url_map,
session_store=None, cookie_name=None):
# call the parent for initialization
RequestBase.__init__(self, environ)
# create an adapter
self.url_adapter = url_map.bind_to_environ(environ)
# create all stuff for sessions
self.session_store = session_store
self.cookie_name = cookie_name
if session_store is not None and cookie_name is not None:
if cookie_name in self.cookies:
# get the session out of the storage
self.session = session_store.get(self.cookies[cookie_name])
else:
# create a new session
self.session = session_store.new()
def url_for(self, callback, **values):
return self.url_adapter.build(callback, values)
class Response(ResponseBase):
"""
A subclass of base response which sets the default mimetype to text/html.
It the `Request` that came in is using Werkzeug sessions, this class
takes care of saving that session.
"""
default_mimetype = 'text/html'
def __call__(self, environ, start_response):
# get the request object
request = environ['werkzeug.request']
if request.session_store is not None:
# save the session if neccessary
request.session_store.save_if_modified(request.session)
# set the cookie for the browser if it is not there:
if request.cookie_name not in request.cookies:
self.set_cookie(request.cookie_name, request.session.sid)
# go on with normal response business
return ResponseBase.__call__(self, environ, start_response)
class Processor(object):
"""A request and response processor - it is what Django calls a
middleware, but Werkzeug also includes straight-foward support for real
WSGI middlewares, so another name was chosen.
The code of this processor is derived from the example in the Werkzeug
trac, called `Request and Response Processor
<http://dev.pocoo.org/projects/werkzeug/wiki/RequestResponseProcessor>`_
"""
def process_request(self, request):
return request
def process_response(self, request, response):
return response
def process_view(self, request, view_func, view_args, view_kwargs):
"""process_view() is called just before the Application calls the
function specified by view_func.
If this returns None, the Application processes the next Processor,
and if it returns something else (like a Response instance), that
will be returned without any further processing.
"""
return None
def process_exception(self, request, exception):
return None
class Application(object):
"""A generic WSGI application which can be used to start with Werkzeug in
an easy, straightforward way.
"""
def __init__(self, name, url_map, session=False, processors=None):
# save the name and the URL-map, as it'll be needed later on
self.name = name
self.url_map = url_map
# save the list of processors if supplied
self.processors = processors or []
# create an instance of the storage
if session:
self.store = session
else:
self.store = None
def __call__(self, environ, start_response):
# create a request - with or without session support
if self.store is not None:
request = Request(environ, self.url_map,
session_store=self.store, cookie_name='%s_sid' % self.name)
else:
request = Request(environ, self.url_map)
# apply the request processors
for processor in self.processors:
request = processor.process_request(request)
try:
# find the callback to which the URL is mapped
callback, args = request.url_adapter.match(request.path)
except (HTTPException, RequestRedirect), e:
response = e
else:
|
# check all view processors
for processor in self.processors:
action = processor.process_view(request, callback
|
, (), args)
if action is not None:
# it is overriding the default behaviour, this is
# short-circuiting the processing, so it returns here
return action(environ, start_response)
try:
response = callback(request, **args)
except Exception, exception:
# the callback raised some exception, need to process that
for processor in reversed(self.processors):
# filter it through the exception processor
action = processor.process_exception(request, exception)
if action is not None:
# the exception processor returned some action
return action(environ, start_response)
# still not handled by a exception processor, so re-raise
raise
# apply the response processors
for processor in reversed(self.processors):
response = processor.process_response(request, response)
# return the completely processed response
return response(environ, start_response)
def config_session(self, store, expiration='session'):
"""
Configures the setting for cookies. You can also disable cookies by
setting store to None.
"""
self.store = store
# expiration=session is the default anyway
# TODO: add settings to define the expiration date, the domain, the
# path any maybe the secure parameter.
class TemplateNotFound(IOError, LookupError):
"""
A template was not found by the template loader.
"""
def __init__(self, name):
IOError.__init__(self, name)
self.name = name
class TemplateLoader(object):
"""
A simple loader interface for the werkzeug minitmpl
template language.
"""
def __init__(self, search_path, encoding='utf-8'):
self.search_path = path.abspath(search_path)
self.encoding = encoding
def get_template(self, name):
"""Get a template from
|
avanzosc/avanzosc6.1
|
avanzosc_stpick_expectdate/__init__.py
|
Python
|
agpl-3.0
| 1,019
| 0.003925
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2014 AvanzOSC (Daniel). All Rights Reserved
# Date: 20/02/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License
|
as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You
|
should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import wizard
|
ProjectSWGCore/NGECore2
|
scripts/loot/lootItems/re_junk/mark_v_vocab_module.py
|
Python
|
lgpl-3.0
| 418
| 0.064593
|
def itemTemplate():
return ['o
|
bject/tangible/loot/npc_loot/shared_softwa
|
re_module_orange_generic.iff']
def customItemName():
return 'Mark V Vocab Module'
def lootDescriptor():
return 'customattributes'
def customizationAttributes():
return ['/private/index_color_1']
def customizationValues():
return [0]
def stackable():
return 1
def junkDealerPrice():
return 28
def junkType():
return 0
|
stdweird/aquilon
|
upgrade/1.8.18/add_sandbox_startpoint.py
|
Python
|
apache-2.0
| 4,924
| 0.002437
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add start points for existing sandboxes
"""
import os, os.path
import sys
import logging
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(BINDIR, "..", "..", "lib", "python2.6"))
import aquilon.aqdb.depends
import aquilon.worker.depends
from aquilon.config import C
|
onfig
from sqlalchemy.orm import defer
from sqlalchemy.sql import text
from sqlalchemy.exc import DatabaseError
from aquilon.aqdb.model import Base, Sandbox, Domain
from aquilon.aqdb.db_factory import DbFactory
from aquilon.worker.processes import r
|
un_git
db = DbFactory()
Base.metadata.bind = db.engine
session = db.Session()
config = Config()
def main():
print "Calculating sandbox base commits. This may take around 10 minutes."
logging.basicConfig(level=logging.WARNING)
kingdir = config.get("broker", "kingdir")
domains = session.query(Domain).all()
# Define preference order when multiple domains have the same commits.
# This is just cosmetics, but makes it easier to verify the output.
for idx, domain in enumerate(("prod", "qa", "secure-aquilon-prod",
"secure-aquilon-qa")):
dbdom = Domain.get_unique(session, domain, compel=True)
domains.remove(dbdom)
domains.insert(idx, dbdom)
base_commits = {}
q = session.query(Sandbox)
q = q.order_by('name')
# The base_commit column does not exist yet...
q = q.options(defer("base_commit"))
for sandbox in q:
base_domain = None
base_commit = None
min_ahead = None
commits = run_git(["rev-list", "refs/heads/" + sandbox.name], path=kingdir).split("\n")
for domain in domains:
merge_base = run_git(["merge-base", "refs/heads/" + sandbox.name,
"refs/heads/" + domain.name],
path=kingdir).strip()
# Number of commits since branching from the given domain
ahead = commits.index(merge_base)
if base_domain is None or ahead < min_ahead:
base_domain = domain
base_commit = merge_base
min_ahead = ahead
if min_ahead == 0:
break
print "{0: <40}: {1.name} (ahead {2})".format(sandbox, base_domain,
min_ahead)
base_commits[sandbox.name] = base_commit
session.expunge_all()
try:
if session.bind.dialect.name == 'oracle':
query = text("""
ALTER TABLE sandbox ADD base_commit VARCHAR2(40 CHAR)
""")
elif session.bind.dialect.name == 'postgresql':
query = text("""
ALTER TABLE sandbox ADD base_commit CHARACTER VARYING (40)
""")
print "\nExecuting: %s" % query
session.execute(query)
session.commit()
except DatabaseError:
# Allow the script to be re-run by not failing if the column already
# exists. If the column does not exist, then trying to update it will
# fail anyway.
print """
WARNING: Adding the sandbox.base_commit column has failed. If you're running
this script for the second time, then that's likely OK, otherwise you should
verify and correct the schema manually.
"""
session.rollback()
for sandbox in q:
sandbox.base_commit = base_commits[sandbox.name]
session.commit()
try:
if session.bind.dialect.name == 'oracle':
query = text("""
ALTER TABLE sandbox MODIFY (base_commit VARCHAR2(40 CHAR)
CONSTRAINT sandbox_base_commit_nn NOT NULL)
""")
elif session.bind.dialect.name == 'postgresql':
query = text("""
ALTER TABLE sandbox ALTER COLUMN base_commit SET NOT NULL
""")
print "\nExecuting: %s" % query
session.execute(query)
session.commit()
except DatabaseError:
print """
WARNING: Enabling the NOT NULL constraint for sandbox.base_commit column has
failed. If you're running this script for the second time, then that's likely
OK, otherwise you should verify and correct the schema manually.
"""
session.rollback()
if __name__ == '__main__':
main()
|
structrans/Canon
|
test/seq/test_seqreader.py
|
Python
|
mit
| 560
| 0
|
import pytest
from canon.seq.seqreader import SeqReader
from .. import resource
def test_read_seq():
reader = SeqReader(resource('seq/Quartz_500Mpa_.SEQ'))
reader.get_Om()
Z, _, N = reader.get_Zmap('orsnr___')
def test_merge_Zmap():
reader = SeqReader()
reader.read_seq(r
|
esource(
|
'seq/au30_a1_.SEQ'))
Z1, _, N1 = reader.get_Zmap('orsnr___')
reader.read_seq(resource('seq/au30_m1_.SEQ'))
Z2, _, N2 = reader.get_Zmap('orsnr___')
Z, N = SeqReader.merge_Zmap(Z1, Z2, N1, N2)
if __name__ == '__main__':
pytest.main()
|
rafaelmartins/blohg
|
blohg/vcs_backends/git/changectx.py
|
Python
|
gpl-2.0
| 4,270
| 0
|
# -*- coding: utf-8 -*-
"""
blohg.vcs_backends.git.changectx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Model with classes to represent Git change context.
:copyright: (c) 2010-2013 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
import time
from flask.helpers import locked_cached_property
from pygit2 import Repository, GIT_OBJ_BLOB, GIT_OBJ_TREE
from zlib import adler32
from blohg.vcs_backends.git.filectx import FileCtx
from blohg.vcs import ChangeCtx
class ChangeCtxDefault(ChangeCtx):
"""Class with the specific implementation details for the change context
of the default revision state of the repository. It inherits the common
implementation from the class :class:`ChangeCtxBase`.
"""
def __init__(self, repo_path):
self._repo_path = repo_path
self._repo = Repository(self._repo_path)
self._ctx = self._repo[self.revision_id]
@locked_cached_property
def files(self):
def r(_files, repo, tree, prefix=None):
for entry in tree:
obj = repo[entry.oid]
filename = prefix and (prefix + '/' + entry.name) or entry.name
if obj.type == GIT_OBJ_TREE:
r(_files, repo, obj, filename)
elif obj.type == GIT_OBJ_BLOB:
_files.append(filename)
else:
raise RuntimeError('Invalid object: %s' % filename)
f = []
r(f, self._repo, self._ctx.tree)
return sorted(f)
@locked_cached_property
def revision_id(self):
"""This property should be cached because the lookup_reference method
reloads itself.
"""
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
raise RuntimeError('Branch "master" not found!')
return ref.target
def needs_reload(self):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
return True
return self.revision_id != ref.target
def filectx_needs_reload(self, filectx):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
raise RuntimeError('Branch "master" not found!')
return filectx._changectx.oid != ref.target
def published(self, date, now):
return date <= now
def etag(self, filectx):
return 'blohg-%i-%i-%s' % (filectx.mdate or filectx.date,
len(filectx.data),
adler32(filectx.path.encode('utf-8'))
& 0xffffffff)
def get_filectx(self, path):
return FileCtx(self._repo, self._ctx, path)
class ChangeCtxWorkingDir(ChangeCtxDefault):
"""Class with the specific implementation details for the change context
of the working dir of the repository. I
|
t inherits the common implementation
from the class :class:`ChangeCtxBase`.
"""
@locked_cached_property
def revision_id(self):
if self._repo.workdir is None:
raise RuntimeError('Bare repositories should be deployed with '
'REVISION_DEFAULT change context')
|
try:
return self._repo.head.target
except Exception:
raise RuntimeError('HEAD reference not found! Please do your '
'first commit.')
@locked_cached_property
def files(self):
return [entry.path for entry in self._repo.index]
def needs_reload(self):
"""This change context is mainly used by the command-line tool, and
didn't provides any reliable way to evaluate its "freshness". Always
reload.
"""
return True
def filectx_needs_reload(self, filectx):
return True
def published(self, date, now):
return True
def etag(self, filectx):
return 'blohg-%i-%i-%s' % (time.time(), len(filectx.data),
adler32(filectx.path.encode('utf-8'))
& 0xffffffff)
def get_filectx(self, path):
return FileCtx(self._repo, self._ctx, path, use_index=True)
|
anshulsharmanyu/twitter_plot
|
Twitter Map Cloud Assignment/googleMapsTweet/apps.py
|
Python
|
gpl-3.0
| 145
| 0.006897
|
from __future__ impor
|
t unicode_literals
from django.apps import AppConfig
class GooglemapstweetConfig(AppConf
|
ig):
name = 'googleMapsTweet'
|
jbedorf/tensorflow
|
tensorflow/compiler/tests/variable_ops_test.py
|
Python
|
apache-2.0
| 21,657
| 0.008635
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reading and writing variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent impor
|
t GradientDescentOptimizer
class VariableOpsTest(xla_test.XLATestCase):
"""Test cases for resource variable operators."""
def testWriteEmptyShape(self):
# Verifies that we can pass an uninitialized variable with an empty shape,
|
# assign it a value, and successfully return it.
for dtype in self.numeric_types:
with self.test_session() as sess, self.test_scope():
zeros = np.zeros([3, 0], dtype=dtype)
v = resource_variable_ops.ResourceVariable(zeros)
p = array_ops.placeholder(dtype)
x = v.assign(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(zeros, sess.run(y, {p: zeros}))
def testOneWriteOneOutput(self):
# Regression test for a bug where computations with one non-constant
# output and one variable update were mishandled.
for dtype in self.numeric_types:
init = np.array([[1, 2j], [3, 4]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
x = v.assign_add(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(
np.array([[2, 1 + 2j], [4, 5]]).astype(dtype), sess.run(y, {p: 1}))
def testSparseRead0DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8j, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read(2)
self.assertAllClose(
np.array([8j, 9, 10, 11]).astype(dtype), self.evaluate(x))
def testSparseRead1DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6j, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([2, 1])
self.assertAllClose(
np.array([[8, 9, 10, 11], [4, 5, 6j, 7]]).astype(dtype),
self.evaluate(x))
def testSparseRead2DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2j, 3], [4, 5, 6, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [0, 2]])
self.assertAllClose(
np.array([[[8, 9, 10, 11], [4, 5, 6, 7]],
[[0, 1, 2j, 3], [8, 9, 10, 11]]]).astype(dtype),
self.evaluate(x))
def testSparseRead2DIndices3DTensor(self):
for dtype in self.numeric_types:
init = np.array([[[0, 1, 2], [3, 4, 5]], [[10, 11, 12], [13, 14, 15]],
[[20, 21, 22], [23, 24j, 25]],
[[30, 31, 32], [33, 34, 35]]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [3, 0]])
self.assertAllClose(
np.array(
[[[[20, 21, 22], [23, 24j, 25]], [[10, 11, 12], [13, 14, 15]]],
[[[30, 31, 32], [33, 34, 35]], [[0, 1, 2], [3, 4, 5]]]
],).astype(dtype), self.evaluate(x))
def testShape(self):
for dtype in self.numeric_types:
init = np.ones([2, 3]).astype(dtype)
with self.test_session() as session, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
session.run(variables.variables_initializer([v]))
h = v.handle
s32, s64 = session.run([
resource_variable_ops.variable_shape(h),
resource_variable_ops.variable_shape(h, out_type=dtypes.int64)
])
self.assertEqual(s32.dtype, np.int32)
self.assertEqual(s64.dtype, np.int64)
self.assertAllEqual(s32, [2, 3])
self.assertAllEqual(s64, [2, 3])
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
for dtype in self.numeric_types:
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtype,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, dtype(47))
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, np.array(6 + 2j).astype(dtype))
with ops.control_dependencies([d]):
e = state_ops.assign_sub(x, dtype(3))
with ops.control_dependencies([e]):
f = x.read_value()
session.run(variables.global_variables_initializer())
v1, v2, v3 = session.run([a, c, f])
self.assertAllClose(dtype(2), v1)
self.assertAllClose(dtype(47), v2)
self.assertAllClose(np.array(50 + 2j).astype(dtype), v3)
def testTraining(self):
"""Tests a gradient descent step for a simple model."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = GradientDescentOptimizer(0.1)
|
Eclipse-2017/waveforms
|
playback/inmarsat_playback.py
|
Python
|
gpl-3.0
| 26,718
| 0.010517
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Inmarsat Playback
# Generated: Mon Aug 21 21:42:34 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import mapper
import math
import sip
import sys
from gnuradio import qtgui
class inmarsat_playback(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Inmarsat Playback")
Qt.QWidget.__init__(self)
self.setWindowTitle("Inmarsat Playback")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "inmarsat_playback")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 250e3
self.interp = interp = 48
self.decim = decim = 250
self.baud = baud = 1200
self.samp_per_sym = samp_per_sym = (samp_rate/decim*interp)/baud
self.alpha = alpha = 0.5
self.xlate_filter_taps = xlate_filter_taps = firdes.low_pass(1,samp_rate, samp_rate/2, 1000, firdes.WIN_HAMMING, 6.76)
self.rrc_filter_taps = rrc_filter_taps = firdes.root_raised_cosine(32, 1.0, 1.0/(samp_per_sym*32), alpha, int(samp_per_sym*32))
self.loop_bw = loop_bw = 300
self.freq_correct = freq_correct = -28.7e3
self.delay = delay = 0
self.cutoff = cutoff = 4800
self.cols = cols = 54
##################################################
# Blocks
##################################################
self._freq_correct_tool_bar = Qt.QToolBar(self)
self._freq_correct_tool_bar.addWidget(Qt.QLabel("freq_correct"+": "))
self._freq_correct_line_edit = Qt.QLineEdit(str(self.freq_correct))
self._freq_correct_tool_bar.addWidget(self._freq_correct_line_edit)
self._freq_correct_line_edit.returnPressed.connect(
lambda: self.set_freq_correct(eng_notation.str_to_num(str(self._freq_correct_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._freq_correct_tool_bar, 12,0,1,2)
self._delay_tool_bar = Qt.QToolBar(self)
self._delay_tool_bar.addWidget(Qt.QLabel("delay"+": "))
self._delay_line_edit = Qt.QLineEdit(str(self.delay))
self._delay_tool_bar.addWidget(self._delay_line_edit)
self._delay_line_edit.returnPressed.connect(
lambda: self.set_delay(int(str(self._delay_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._delay_tool_bar, 12,2,1,2)
self._cutoff_tool_bar = Qt.QToolBar(self)
self._cutoff_tool_bar.addWidget(Qt.QLabel("cutoff"+": "))
self._cutoff_line_edit = Qt.QLineEdit(str(self.cutoff))
self._cutoff_tool_bar.addWidget(self._cutoff_line_edit)
self._cutoff_line_edit.returnPressed.connect(
lambda: self.set_cutoff(eng_notation.str_to_num(str(self._
|
cutoff_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._cutoff_tool_bar, 13,0,1,2)
self._cols_range = Range(1, 500, 1, 54, 200)
self._cols
|
_win = RangeWidget(self._cols_range, self.set_cols, "cols", "counter_slider", float)
self.top_grid_layout.addWidget(self._cols_win, 12,4,1,4)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_ccc(
interpolation=interp,
decimation=decim,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=1,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim*interp, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.01)
self.qtgui_waterfall_sink_x_0_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-60, -10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 4,4,4,4)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.01)
self.qtgui_waterfall_sink_x_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 4,0,4,4)
|
gregmuellegger/django-autofixture
|
autofixture_tests/tests/test_autodiscover.py
|
Python
|
bsd-3-clause
| 325
| 0
|
from django.contrib.auth.models import User
from django.test i
|
mport TestCase
import autofixture
autofixture.autodiscover()
class AutodiscoverTestCase(TestCase):
def test_builtin_fixtures(self):
from autofixture.autofixtures import UserFixture
self.as
|
sertEqual(autofixture.REGISTRY[User], UserFixture)
|
googlefonts/cu2qu
|
tests/pens_test.py
|
Python
|
apache-2.0
| 13,027
| 0.000077
|
from __future__ import print_function, division, absolute_import
import unittest
from cu2qu.pens import Cu2QuPen, Cu2QuPointPen
from . import CUBIC_GLYPHS, QUAD_GLYPHS
from .utils import DummyGlyph, DummyPointGlyph
from .utils import DummyPen, DummyPointPen
from fontTools.misc.loggingTools import CapturingLogHandler
from textwrap import dedent
import logging
MAX_ERR = 1.0
class _TestPenMixin(object):
"""Collection of tests that are shared by both the SegmentPen and the
PointPen test cases, plus some helper methods.
"""
maxDiff = None
def diff(self, expected, actual):
import difflib
expected = str(self.Glyph(expected)).splitlines(True)
actual = str(self.Glyph(actual)).splitlines(True)
diff = difflib.unified_diff(
expected, actual, fromfile='expected', tofile='actual')
return "".join(diff)
def convert_glyph(self, glyph, **kwargs):
# draw source glyph onto a new glyph using a Cu2Qu pen and return it
converted = self.Glyph()
pen = getattr(converted, self.pen_getter_name)()
quadpen = self.Cu2QuPen(pen, MAX_ERR, **kwargs)
getattr(glyph, self.draw_method_name)(quadpen)
return converted
def expect_glyph(self, source, expected):
converted = self.convert_glyph(source)
self.assertNotEqual(converted, source)
if not converted.approx(expected):
print(self.diff(expected, converted))
self.fail("converted glyph is different from expected")
def test_convert_simple_glyph(self):
self.expect_glyph(CUBIC_GLYPHS['a'], QUAD_GLYPHS['a'])
self.expect_glyph(CUBIC_GLYPHS['A'], QUAD_GLYPHS['A'])
def test_convert_composite_glyph(self):
source = CUBIC_GLYPHS['Aacute']
converted = self.convert_glyph(source)
# components don't change after quadratic conversion
self.assertEqual(converted, source)
def test_convert_mixed_glyph(self):
# this contains a mix of contours and components
self.expect_glyph(CUBIC_GLYPHS['Eacute'], QUAD_GLYPHS['Eacute'])
def test_reverse_direction(self):
for name in ('a', 'A', 'Eacute'):
|
source = CUBIC_GLYPHS[name]
normal_glyph = self.convert_glyph(sour
|
ce)
reversed_glyph = self.convert_glyph(source, reverse_direction=True)
# the number of commands is the same, just their order is iverted
self.assertTrue(
len(normal_glyph.outline), len(reversed_glyph.outline))
self.assertNotEqual(normal_glyph, reversed_glyph)
def test_stats(self):
stats = {}
for name in CUBIC_GLYPHS.keys():
source = CUBIC_GLYPHS[name]
self.convert_glyph(source, stats=stats)
self.assertTrue(stats)
self.assertTrue('1' in stats)
self.assertEqual(type(stats['1']), int)
def test_addComponent(self):
pen = self.Pen()
quadpen = self.Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPen, self).__init__(*args, **kwargs)
self.Glyph = DummyGlyph
self.Pen = DummyPen
self.Cu2QuPen = Cu2QuPen
self.pen_getter_name = 'getPen'
self.draw_method_name = 'draw'
def test__check_contour_is_open(self):
msg = "moveTo is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
with self.assertRaisesRegex(AssertionError, msg):
quadpen.lineTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.qCurveTo((0, 0), (1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.curveTo((0, 0), (1, 1), (2, 2))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.closePath()
with self.assertRaisesRegex(AssertionError, msg):
quadpen.endPath()
quadpen.moveTo((0, 0)) # now it works
quadpen.lineTo((1, 1))
quadpen.qCurveTo((2, 2), (3, 3))
quadpen.curveTo((4, 4), (5, 5), (6, 6))
quadpen.closePath()
def test__check_contour_closed(self):
msg = "closePath or endPath is required"
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.moveTo((1, 1))
with self.assertRaisesRegex(AssertionError, msg):
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
# it works if contour is closed
quadpen.closePath()
quadpen.moveTo((1, 1))
quadpen.endPath()
quadpen.addComponent("a", (1, 0, 0, 1, 0, 0))
def test_qCurveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal qcurve segment point count: 0"):
quadpen.qCurveTo()
def test_qCurveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_qCurveTo_more_than_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_no_points(self):
quadpen = Cu2QuPen(DummyPen(), MAX_ERR)
quadpen.moveTo((0, 0))
with self.assertRaisesRegex(
AssertionError, "illegal curve segment point count: 0"):
quadpen.curveTo()
def test_curveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.lineTo((1, 1))",
])
def test_curveTo_2_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((1, 1), (2, 2))",
])
def test_curveTo_3_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))",
])
def test_curveTo_more_than_3_points(self):
# a 'SuperBezier' as described in fontTools.basePen.AbstractPen
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3), (4, 4))
self.assertEqual(str(pen).splitlines(), [
"pen.moveTo((0, 0))",
"pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))",
"pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))",
])
def test_addComponent(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.addComponent("a", (1, 2, 3, 4, 5.0, 6.0))
# components are passed through without changes
self.assertEqual(str(pen).splitlines(), [
"pen.addComponent('a', (1, 2, 3, 4, 5.0, 6.0))",
])
def test_ignore_single_points(self):
pen = DummyPen()
try:
logging.captureWarnings(True)
with CapturingLogHandler("py.warnings", level="WARNING") as log:
quadpen = Cu2QuPen(pen, MAX_ERR, ignore_single_points=True)
finally:
|
jrowan/zulip
|
zerver/tornado/event_queue.py
|
Python
|
apache-2.0
| 37,008
| 0.002972
|
# See http://zulip.readthedocs.io/en/latest/events-system.html for
# high-level documentation on how this system works.
from __future__ import absolute_import
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Text, Union
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.timezone import now as timezone_now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
import traceback
from zerver.models import UserProfile, Client
from zerver.decorator import RespondAsynchronously
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
import copy
import six
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
event_types, client_type_name, apply_markdown=True,
all_public_streams=False, lifespan_secs=0, narrow=[]):
# type: (int, Text, int, EventQueue, Optional[Sequence[str]], Text, bool, bool, int, Iterable[Sequence[Text]]) -> None
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.curren
|
t_client_name = None # type: Optional[Text]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
|
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.add_timeout
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self):
# type: () -> str
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
ret = cls(d['user_profile_id'], d['user_profile_email'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
d['client_type_name'], d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
# type: () -> None
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event):
# type: (Dict[str, Any]) -> None
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
# type: () -> bool
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
# type: (Mapping[str, Any]) -> bool
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
# type: () -> bool
return self.event_types is None or "message" in self.event_types
def idle(self, now):
# type: (float) -> bool
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id, client_name):
# type: (int, Text) -> None
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback():
# type: () -> None
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
|
DCPUTeam/DCPUToolchain
|
docs/sphinxext/toolchain.py
|
Python
|
mit
| 335
| 0.020896
|
# Directi
|
ves using the toolchain
# documentation.
import docutils
def setup(app):
app.add_object_type("asmdirective", "asmdir");
app.add_object_type("asminstruction", "asminst");
app.add_obje
|
ct_type("ppexpressionop", "ppexprop");
app.add_object_type("ppdirective", "ppdir");
app.add_object_type("literal", "lit");
|
great-expectations/great_expectations
|
tests/integration/docusaurus/connecting_to_your_data/database/redshift_yaml_example.py
|
Python
|
apache-2.0
| 3,566
| 0.001963
|
import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
redshift_username = os.environ.get("REDSHIFT_USERNAME")
redshift_password = os.environ.get("REDSHIFT_PASSWORD")
redshift_host = os.environ.get("REDSHIFT_HOST")
redshift_port = os.environ.get("REDSHIFT_PORT")
redshift_database = os.environ.get("REDSHIFT_DATABASE")
redshift_sslmode = os.environ.get("REDSHIFT_SSLMODE")
CONNECTION_STRING = f"postgresql+psycopg2://{redshift_username}:{redshift_password}@{redshift_host}:{redshift_port}/{redshift_database}?sslmode={redshift_sslmode}"
# This utility is not for general use. It is only to support testing.
from tests.test_utils import load_data_into_test_database
load_data_into_test_database(
table_name="taxi_data",
csv_path="./data/yellow_tripdata_sample_2019-01.csv",
connection_string=CONNECTION_STRING,
)
context = ge.get_context()
datasource_yaml = f"""
name: my_redshift_datasource
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
include_schema_name: true
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>",
CONNECTION_STRING,
)
context.te
|
st_yaml_config(datasource_yaml)
context.add_datasource(**yaml.load(datasource_yaml))
# First test for RuntimeBatchRequest using a query
batch_request = RuntimeBatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_name", # this can be anything that identifies this data
runtime_parameters={"query": "SELECT * from taxi_data LIMIT 10"},
batch_identifiers={"default_identifie
|
r_name": "default_identifier"},
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
# Second test for BatchRequest naming a table
batch_request = BatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="taxi_data", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_redshift_datasource"]
assert "taxi_data" in set(
context.get_available_data_asset_names()["my_redshift_datasource"][
"default_inferred_data_connector_name"
]
)
|
mionch/django-getpaid
|
getpaid/backends/eservice/urls.py
|
Python
|
mit
| 488
| 0.010246
|
from django.conf.ur
|
ls import patterns, url
from django.views.decorators.csrf import csrf_exempt
from getpaid.backends.eservice.views import PendingView, SuccessView, FailureView
urlpatterns = patterns('',
url(r'^pending/$', csrf_exempt(PendingView.as_view()), name='getpaid-eservice-pending'),
url(r'^success/$', csrf_exempt(SuccessView.as_view()), name='getpaid-eservice-success'),
url(r'^failure/$', csrf_exempt(FailureView.as_view()), name='g
|
etpaid-eservice-failure'),
)
|
rembo10/headphones
|
lib/feedparser/util.py
|
Python
|
gpl-3.0
| 6,490
| 0.000308
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
class FeedParserDict(dict):
keymap = {
'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail',
}
def __getitem__(self, key):
"""
:return: A :class:`FeedParserDict`.
"""
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name, value) for (name, value) in link.items() if name != 'rel'])
return [
norel(link)
for link in dict.__getitem__(self, 'links')
if link['rel'] == 'enclosure'
]
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel'] == 'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if (
not dict.__contains__(self, 'updated')
and dict.__contains__(self, 'published')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. Thi
|
s fallback will be removed in a future version "
"of feedparser.
|
",
DeprecationWarning,
)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if (
not dict.__contains__(self, 'updated_parsed')
and dict.__contains__(self, 'published_parsed')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
"""
:return: A :class:`FeedParserDict`.
"""
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, k, default):
if k not in self:
self[k] = default
return default
return self[k]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
# This is incorrect behavior -- dictionaries shouldn't be hashable.
# Note to self: remove this behavior in the future.
return id(self)
|
sebastic/QGIS
|
python/plugins/GdalTools/__init__.py
|
Python
|
gpl-2.0
| 1,251
| 0.000799
|
"""
/***************************************************************************
Name : GdalTools
Description : Integrate gdal tools into qgis
Date : 17/Sep/09
copyright : (C) 2009 by Lorenzo Masini and Giuseppe Sucameli (Faunalia)
email : lorenxo86@gmail.com - brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as publi
|
shed by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plu
|
gin, making it known to QGIS.
"""
def classFactory(iface):
# load GdalTools class from file GdalTools
from GdalTools import GdalTools
return GdalTools(iface)
|
noelevans/sandpit
|
decay_fns/decay_weightings.py
|
Python
|
mit
| 574
| 0.020906
|
import numpy as np
import pandas as pd
def decay_me
|
an(ol, half_life=5):
years = np.array([2012, 2013, 2014, 2016, 2017])
ratings = np.array([9, 11, 14, 11, 4])
today = 2016 + 1
print(ratings.mean())
elapsed_time = years - today
half_life = 2
weights = np.e ** -(elapsed_time * half_life)
print weights
print weights / sum(weights)
print(sum(ratings * weights) / sum(weights))
def main():
print(decay_mean(ratings, 2))
print(decay_mean(ratings, 5))
p
|
rint(ratings.mean())
if __name__ == '__main__':
main()
|
puruckertom/poptox
|
poptox/generic/generic_batchinput.py
|
Python
|
unlicense
| 1,372
| 0.019679
|
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
c
|
lass genericBatchInputPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'generic','page':'batchinput'})
html = html +
|
template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberbatchinput.html', {
'model':'generic',
'model_attributes':'generic Batch Input'})
html = html + template.render(templatepath + '04uberbatchinput_jquery.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', genericBatchInputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
jorgecarleitao/public-contracts
|
contracts/category_urls.py
|
Python
|
bsd-3-clause
| 977
| 0.007165
|
from django.conf.urls import patterns, url
from django.utils.translation import ugettext_lazy as _
from . import category_views
from . import feed
urlpatterns = patterns('',
url(r'(\d+)$', category_views.main_view, name='category'),
url(r'(\d+)/%s$' % _('contracts'), category_views.contracts, name='category_contracts'),
url(r'(\d+)
|
/%s/rss$' % _('contracts'), feed.CategoryContractsFeed(), name='category_contracts_rss'),
url(r'(\d+)/%s$' % _('contractors'), category_views.contractors, name='category_contractors'),
url(r'(\d+)/%s$' % _('contracted'), category_views.contracted, name='category_contracted'),
|
url(r'(\d+)/%s$' % _('tenders'), category_views.tenders, name='category_tenders'),
url(r'(\d+)/%s/rss$' % _('tenders'), feed.CategoryTendersFeed(), name='category_tenders_rss'),
)
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.py
|
Python
|
mit
| 19,614
| 0.005302
|
from __future__ import division, print_function, absolute_import
from os import path
import warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal, run_module_suite,
assert_)
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
|
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(pat
|
h.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
|
ericholscher/django
|
django/test/client.py
|
Python
|
bsd-3-clause
| 22,448
| 0.000757
|
from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from importlib import import_module
from io import BytesIO
from django.conf import settings
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Ca
|
nnot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
|
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.
|
ii0/bits
|
python/einj.py
|
Python
|
bsd-3-clause
| 23,447
| 0.004478
|
# Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Error Injection EINJ module."""
from __future__ import print_function
import acpi
import bits
import contextlib
from cpudetect import cpulib
import ctypes
import functools
import ttypager
# Create constants for each value in these dictionaries for readability. These
# names are too generic to put in the acpi module's namespace, but they make
# sense in the einj module.
globals().update(map(reversed, acpi._error_injection_action.iteritems()))
globals().update(map(reversed, acpi._error_injection_instruction.iteritems()))
read_mem = {
1: bits.readb,
2: bits.readw,
3: bits.readl,
4: bits.readq,
}
write_mem = {
1: bits.writeb,
2: bits.writew,
3: bits.writel,
4: bits.writeq,
}
out_port = {
1: bits.outb,
2: bits.outw,
3: bits.outl,
}
error_injection_command_status = {
0x0: 'SUCCESS',
0x1: 'UNKNOWN_FAILURE',
0x2: 'INVALID_ACCESS',
}
globals().update(map(reversed, error_injection_command_status.iteritems()))
# List of actions that can be executed with no custom processing
_action_simple = [
BEGIN_INJECTION_OPERATION,
END_OPERATION,
EXECUTE_OPERATION,
CHECK_BUSY_STATUS,
GET_COMMAND_STATUS,
]
def _execute_action(entry, value=None):
print("entry.injection_action = {:#x} ({})".format(entry.injection_action, acpi._error_injection_action.get(entry.injection_action, "Unknown")))
if entry.injection_action in _action_simple:
return _execute_instruction(entry)
elif entry.injection_action == GET_TRIGGER_ERROR_ACTION_TABLE:
return acpi.trigger_error_action(_execute_instruction(entry))
elif entry.injection_action == SET_ERROR_TYPE:
if value is None:
raise ValueError("action SET_ERROR_TYPE but no input parameter provided")
return _execute_instruction(entry, value.data)
elif entry.injection_action == GET_ERROR_TYPE:
_execute_instruction(entry)
return acpi.error_type_flags.from_address(entry.register_region.address)
elif entry.injection_action == SET_ERROR_TYPE_WITH_ADDRESS:
if value is None:
raise ValueError("action SET_ERROR_TYPE_WITH_ADDRESS but no input paramters provided")
error_type = value[0]
if error_type.processor_correctable or error_type.processor_uncorrectable_non_fatal or error_type.processor_uncorrectable_fatal:
error_type, flags, apicid = value
cpu_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
if cpu_error.error_type.vendor_defined and cpu_error.vendor_error_type_extension_structure_offset:
vendor_err_addr = entry.register_region.address + cpu_error.vendor_error_type_extension_structure_offset
vendor_error_type_extension = acpi.set_error_type_with_addr.from_address(vendor_err_addr)
print(vendor_error_type_extension)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
cpu_error.error_type = error_type
cpu_error.flags = flags
cpu_error.apicid = apicid
print(cpu_error)
elif error_type.memory_correctable or error_type.memory_uncorrectable_non_fatal or error_type.memory_uncorrectable_fatal:
error_type, flags, mem_addr, mem_addr_range = value
mem_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
mem_error.error_type = error_type
mem_error.flags = flags
mem_error.memory_address = mem_addr
mem_error.memory_address_range = mem_addr_range
print(mem_error)
elif error_type.pci_express_correctable or error_type.pci_express_uncorrectable_non_f
|
atal or error_type.pci_express_uncorrectable_fatal:
error_type, flags, segment, bus, device, function = val
|
ue
pcie_error = acpi.set_error_type_with_addr.from_address(entry.register_region.address)
print('WRITE_REGISTER SET_ERROR_TYPE_WITH_ADDRESS address - {0:#x}'.format(entry.register_region.address))
pcie_error.error_type = error_type
pcie_error.flags = flags
pcie_error.pcie_sbdf.bits.function_num = function
pcie_error.pcie_sbdf.bits.device_num = device
pcie_error.pcie_sbdf.bits.bus_num = bus
pcie_error.pcie_sbdf.bits.pcie_segment = segment
print(pcie_error)
else:
raise ValueError("action SET_ERROR_TYPE_WITH_ADDRESS has unsupported error_type {}".format(error_type))
elif entry.injection_action == TRIGGER_ERROR:
# Execute the actions specified in the trigger action table.
trigger_table = get_trigger_action_table_op()
for entry in trigger_table.entries:
_execute_instruction(entry)
else:
raise ValueError("action is unsupported")
def _execute_instruction(entry, value=None):
print("entry.instruction = {:#x} ({})".format(entry.instruction, acpi._error_injection_instruction.get(entry.instruction, "Unknown")))
if entry.instruction is READ_REGISTER:
return _read_register(entry)
elif entry.instruction is READ_REGISTER_VALUE:
return _read_register_value(entry)
elif entry.instruction is WRITE_REGISTER_VALUE:
return _write_register(entry)
elif entry.instruction is WRITE_REGISTER:
return _write_register(entry, value)
elif entry.instruction is NOOP:
return None
def _read_register(entry):
if entry.register_region.address_space_id == acpi.ASID_SYSTEM_MEMORY:
print('READ_REGISTER address - {:#x}'.format(entry.register_region.address))
value = read_mem[entry.register_region.access_size](entry.register_region.address)
value = value >> entry.register_region.register_bit_offset
value = value & entry.mask
print('READ_REGISTER value - {:#x}'.format(value))
return value
return None
def _read_register_value(entry):
read_value = _read_register(entry)
read_value = read_value >> entry.register_region.register_bit_offset
read_value = read_value & entry.mask
print('entry.value - {:#x}'.format(entry.value))
return read_value == entry.value
def _write_register(entry, value=None):
if not value:
value = entry.value
if entry.register_region.address_space_id == acpi.ASID_SYSTEM_MEMORY:
print('WRITE_REGISTER address - {:#x}'.format(entry.register_region.address))
|
mikalstill/nova
|
nova/api/openstack/placement/exception.py
|
Python
|
apache-2.0
| 6,885
| 0.000436
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions for use in the Placement API."""
# NOTE(cdent): The exceptions are copied from nova.exception, where they
# were originally used. To prepare for extracting placement to its own
# repository we wish to no longer do that. Instead, exceptions used by
# placement should be in the placement hierarchy.
from oslo_log import log as logging
from nova.i18n import _
LOG = logging.getLogger(__name__)
class _BaseException(Exception):
"""Base Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# NOTE(melwitt): This is done in a separate method so it can be
# monkey-patched during testing to make it a hard failure.
self._log_exception()
message = self.msg_fmt
self.message = message
super(_BaseException, self).__init__(message)
def _log_exception(self):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in self.kwargs.items():
LOG.error("%s: %s" % (name, value)) # noqa
def format_message(self):
# Use the first argument to the python Exception object which
# should be our full exception message, (see __init__).
return self.args[0]
class NotFound(_BaseException):
msg_fmt = _("Resource could not be found.")
class Exists(_BaseException):
msg_fmt = _("Resource already exists.")
class InvalidInventory(_BaseException):
msg_fmt = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
class CannotDeleteParentResourceProvider(_BaseException):
msg_fmt = _("Cannot delete resource provider that is a parent of "
"another. Delete child providers first.")
class ConcurrentUpdateDetected(_BaseException):
msg_fmt = _("Another thread concurrently updated the data. "
"Please retry your update")
class ResourceProviderConcurrentUpdateDetected(ConcurrentUpdateDetected):
msg_fmt = _("Another thread concurrently updated the resource provider "
"data. Please retry your update")
class InvalidAllocationCapacityExceeded(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would exceed the capacity.")
class InvalidAllocationConstraintsViolated(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would violate inventory constraints.")
class InvalidInventoryCapacity(InvalidInventory):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than or equal to total.")
class InvalidInventoryCapacityReservedCanBeTotal(InvalidInventoryCapacity):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than total.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class InventoryInUse(InvalidInventory):
# NOTE(mriedem): This message cannot change without impacting the
# nova.scheduler.client.report._RE_INV_IN_USE regex.
msg_fmt = _("Inventory for '%(resource_classes)s' on "
"resource provider '%(resource_provider)s' in use.")
class InventoryWithResourceClassNotFound(NotFound):
msg_fmt = _("No inventory of class %(resource_class)s found.")
class MaxDBRetriesExceeded(_BaseException):
msg_fmt = _("Max retries of DB transaction exceeded attempting to "
"perform %(action)s.")
class ObjectActionError(_BaseException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class PolicyNotAuthorized(_BaseException):
|
msg_fmt = _("Policy does not allow %(action)s to be performed.")
class ResourceClassCannotDeleteStandard
|
(_BaseException):
msg_fmt = _("Cannot delete standard resource class %(resource_class)s.")
class ResourceClassCannotUpdateStandard(_BaseException):
msg_fmt = _("Cannot update standard resource class %(resource_class)s.")
class ResourceClassExists(_BaseException):
msg_fmt = _("Resource class %(resource_class)s already exists.")
class ResourceClassInUse(_BaseException):
msg_fmt = _("Cannot delete resource class %(resource_class)s. "
"Class is in use in inventory.")
class ResourceClassNotFound(NotFound):
msg_fmt = _("No such resource class %(resource_class)s.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class ResourceProviderInUse(_BaseException):
msg_fmt = _("Resource provider has allocations.")
class TraitCannotDeleteStandard(_BaseException):
msg_fmt = _("Cannot delete standard trait %(name)s.")
class TraitExists(_BaseException):
msg_fmt = _("The Trait %(name)s already exists")
class TraitInUse(_BaseException):
msg_fmt = _("The trait %(name)s is in use by a resource provider.")
class TraitNotFound(NotFound):
msg_fmt = _("No such trait(s): %(names)s.")
class ProjectNotFound(NotFound):
msg_fmt = _("No such project(s): %(external_id)s.")
class ProjectExists(Exists):
msg_fmt = _("The project %(external_id)s already exists.")
class UserNotFound(NotFound):
msg_fmt = _("No such user(s): %(external_id)s.")
class UserExists(Exists):
msg_fmt = _("The user %(external_id)s already exists.")
class ConsumerNotFound(NotFound):
msg_fmt = _("No such consumer(s): %(uuid)s.")
class ConsumerExists(Exists):
msg_fmt = _("The consumer %(uuid)s already exists.")
|
macknowak/vrepsim
|
vrepsim/base.py
|
Python
|
gpl-3.0
| 699
| 0
|
# -*- coding: utf-8 -*-
"""Assorted base data structures.
Assorted base data structures provide a generic com
|
municator with V-REP
simulator.
"""
from vrep
|
sim.simulator import get_default_simulator
class Communicator(object):
"""Generic communicator with V-REP simulator."""
def __init__(self, vrep_sim):
if vrep_sim is not None:
self._vrep_sim = vrep_sim
else:
self._vrep_sim = get_default_simulator(raise_on_none=True)
@property
def client_id(self):
"""Client ID."""
return self._vrep_sim.client_id
@property
def vrep_sim(self):
"""Interface to V-REP remote API server."""
return self._vrep_sim
|
Asana/boto
|
boto/rds2/layer1.py
|
Python
|
mit
| 159,859
| 0.00015
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2014-10-31"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaEx
|
ceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.Aut
|
horizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphe
|
Shaps/ansible
|
test/units/plugins/cache/test_cache.py
|
Python
|
gpl-3.0
| 5,922
| 0.001013
|
# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest, mock
from ansible.errors import AnsibleError
from ansible.plugins.cache import FactCache, CachePluginAdjudicator
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
from ansible.plugins.loader import cache_loader
import pytest
class TestCachePluginAdjudicator:
# memory plugin cache
cache = CachePluginAdjudicator()
cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
cache['cache_key_2'] = {'key': 'value'}
def test___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']}
def test_inner___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
self.cache['new_cache_key']['new_key1'][0] = 'updated_value1'
assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']}
def test___contains__(self):
assert 'cache_key' in self.cache
assert 'not_cache_key' not in self.cache
def test_get(self):
assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'}
def test_get_with_default(self):
assert self.cache.get('foo', 'bar') == 'bar'
def test_get_without_default(self):
assert self.cache.get('foo') is None
def test___getitem__(self):
with pytest.raises(KeyError) as err:
self.cache['foo']
def test_pop_with_default(self):
assert self.cache.pop('foo', 'bar') == 'bar'
def test_pop_without_default(self):
with pytest.raises(KeyError) as err:
assert self.cache.pop('foo')
def test_pop(self):
v = self.cache.pop('cache_key_2')
assert v == {'key': 'value'}
assert 'cache_key_2' not in self.cache
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert
|
self.cache['cache_key']['key2'] == 'updatedvalue'
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy
|
='flower'))
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegexp(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_legacy(self):
self.cache.update('cache_key', {'key2': 'updatedvalue'})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_legacy_key_exists(self):
self.cache['cache_key'] = {'key': 'value', 'key2': 'value2'}
self.cache.update('cache_key', {'key': 'updatedvalue'})
assert self.cache['cache_key']['key'] == 'updatedvalue'
assert self.cache['cache_key']['key2'] == 'value2'
class TestAbstractClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_subclass_error(self):
class CacheModule1(BaseCacheModule):
pass
with self.assertRaises(TypeError):
CacheModule1() # pylint: disable=abstract-class-instantiated
class CacheModule2(BaseCacheModule):
def get(self, key):
super(CacheModule2, self).get(key)
with self.assertRaises(TypeError):
CacheModule2() # pylint: disable=abstract-class-instantiated
def test_subclass_success(self):
class CacheModule3(BaseCacheModule):
def get(self, key):
super(CacheModule3, self).get(key)
def set(self, key, value):
super(CacheModule3, self).set(key, value)
def keys(self):
super(CacheModule3, self).keys()
def contains(self, key):
super(CacheModule3, self).contains(key)
def delete(self, key):
super(CacheModule3, self).delete(key)
def flush(self):
super(CacheModule3, self).flush()
def copy(self):
super(CacheModule3, self).copy()
self.assertIsInstance(CacheModule3(), CacheModule3)
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
def test_memory_cachemodule_with_loader(self):
self.assertIsInstance(cache_loader.get('memory'), MemoryCache)
|
pdm126/rss-crawler
|
dev.py
|
Python
|
mit
| 4,063
| 0.03618
|
#!/usr/bin/env python2.7
import feedparser
import sys
import string
# holds the feeds
feeds = []
Feed_list = []
kill = 'g'
tags = 'g'
# multiple feed list
Feed_list = [
'http://krebsonsecurity.com/feed/',
'http://www.tripwire.com/state-of-security/feed/',
'https://threatpost.com/feed'
]
# appends list of feeds via feed parser into big list, slow to load in
for url in Feed_list:
feeds.append(feedparser.parse(url))
def full_list():
# option e feed by chunk
for feed in feeds:
for post in feed.entries:
print '++++++'
print post.title
print post.description
print post.link
print '++++++'
kill == raw_input('next? ')
if kill == 'y':
menu()
def latest_list():
# option d - line by line*
for feed in feeds:
for post in feed.entries:
print post.title
print ' '
print post.summary
print post.link
print '+++++'
kill == raw_input('next? ')
if ki
|
ll == 'y':
menu()
def quick_list():
# option c line by line
for feed in feeds:
for post in feed.entries:
pri
|
nt post.title
print post.link
print '+++++'
kill == raw_input('next? ')
if kill == 'y':
menu()
# deals with keywords
def keyword_title(term):
for feed in feeds:
for post in feed.entries:
if term in post.title:
print 'found keyword ' + term + ' on '
print post.title + '/n' + post.link
again1()
if term not in post.summary:
print 'not found'
again1()
def keyword_full(term):
for feed in feeds:
for post in feed.entries:
if term in post.summary:
print 'found keyword ' + term + ' on ' + post.link
again()
if term not in post.summary:
print 'not found'
again()
# tag listing
def tag_list():
for feed in feeds:
for post in feed.entries:
tags = str(post.tags)
tags = ''.join([c for c in tags if c not in ('{', '}', ':', '[', ']', ',' )])
tags = tags.replace( 'scheme', '' )
tags = tags.replace('term', '')
tags = tags.replace('scheme', '')
tags = tags.replace('None', '')
tags = tags.replace('label', '')
tags = tags.replace("u'", '')
tags = tags.replace("'", '')
tags = tags.replace(" ", '')
# {'term': u'threat intelligence', 'scheme': None, 'label': None}]
print tags
# repeaters
def again():
go_again = raw_input('again? ')
if go_again == 'y':
term = raw_input('Search term? ')
keyword_full(term)
if go_again == 'n':
menu()
def again1():
go_again1 = raw_input('again? ')
if go_again1 == 'y':
term = raw_input('Search term? ')
keyword_title(term)
if go_again1 == 'n':
menu()
def menu():
print 'Welcome Dr Falken'
print ' '
print 'a) Deep RSS Keyword Search'
print 'b) RSS title search'
print 'c) Top post of feeds'
print 'd) Summary List of Feeds'
print 'e) Full list of feeds'
print 'f) Full tag list'
choice = raw_input('please choose an option ')
print 'chosen ' + choice + ' good choice. '
if choice == 'a':
term = raw_input('Search term ?')
keyword_full(term)
if choice == 'b':
term = raw_input('Search term ?')
keyword_title(term)
if choice == 'c':
quick_list()
if choice == 'd':
latest_list()
if choice == 'e':
full_list()
if choice == 'f':
tag_list()
menu()
|
kyokyos/bioinform
|
HBV_APRI_FIB4.py
|
Python
|
unlicense
| 1,993
| 0.038821
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
APRI和FIB4推测肝纤维化或肝硬化情况
This is a temporary script file.
"""
import math
#APRI缩写:AST to Platelet Ratio Index
#AST单位iu/l
#PRI单位10**9/L
#如果APRI>2,可能有肝硬化
def APRI(AST,upper_AST,PRI):
apri=((AST*1.0/upper_AST)*100)/PRI
return apri
#FIB-4缩写Fibrosis-4
#age单位:年
#AST和ALT单位:U/L,(U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位)
def FIB4(age,AST,ALT,PRI):
fib4=(age*AST)/(PRI*math.sqrt(ALT))
return fib4
#肝情况推测
def Liver_condition(apri,fib4):
if apri>2:
print ("可能发生肝硬化")
print("如果是慢性乙肝感染者,需要考虑抗病毒药物治疗")
if fib4<1.45:
print("无明显肝纤维化或2级以下肝纤维化(轻度纤维化)")
if fib4>3.25:
print("肝纤维化程度为3~4级或以上")
#提示
def Print_warmi
|
ng():
print("因算法不断改进,计算结果仅供参考。请随访感染科或肝病科专业医生")
def Print_unit():
print("生化指标来自肝功检测和血常规检测")
print("AST单位:iu/l")
print("ALT单位:U/L")
p
|
rint("PRI单位:10**9/L")
print("年龄单位:年")
print("U/L和iu/L一般可以通用,前者是中国单位,后者是国际单位")
#提示
Print_warming()
#输出生化值单位
print("-"*30)
Print_unit()
print("-"*30)
print("")
print("")
#输入参数
print("请输入以下参数(例如10,23.5等等):")
AST=float(input("天门冬氨酸转移酶值(AST):"))
upper_AST=float(input("天门冬氨酸转移酶(AST)上限值:"))
ALT=float(input("丙氨酸氨基转移酶值(ALT):"))
PRI=float(input("血小板计数值(PRI):"))
age=float(input("年龄:"))
apri=APRI(AST,upper_AST,PRI)
fib4=FIB4(age,AST,ALT,PRI)
print("-"*30)
print("")
print("")
print("推测结果:")
#肝情况推测
Liver_condition(apri,fib4)
|
dshyshov/MAVProxy
|
MAVProxy/modules/mavproxy_calibration.py
|
Python
|
gpl-3.0
| 3,874
| 0.003872
|
#!/usr/bin/env python
'''calibration command handling'''
import time, os
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
class CalibrationModule(mp_module.MPModule):
def __init__(self, mpstate):
super(CalibrationModule, self).__init__(mpstate, "calibration")
self.add_command('ground', self.cmd_ground, 'do a ground start')
self.add_command('level', self.cmd_level, 'set level on a multicopter')
self.add_command('compassmot', self.cmd_compassmot, 'do c
|
ompass/motor interference calibration')
self.add_command('calpress', self.cmd_calpressure,'calibrate pressure sensors')
self.add_command('accelcal', self.cmd_accelcal, 'do 3D accelerometer calibration')
self.add_command('gyrocal', self.cmd_gyrocal, 'do gyro calibration')
self.accelcal_count = -1
self.accel
|
cal_wait_enter = False
self.compassmot_running = False
self.empty_input_count = 0
def cmd_ground(self, args):
'''do a ground start mode'''
self.master.calibrate_imu()
def cmd_level(self, args):
'''run a accel level'''
self.master.calibrate_level()
def cmd_accelcal(self, args):
'''do a full 3D accel calibration'''
mav = self.master
# ack the APM to begin 3D calibration of accelerometers
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 0, 0, 1, 0, 0)
self.accelcal_count = 0
self.accelcal_wait_enter = False
def cmd_gyrocal(self, args):
'''do a full gyro calibration'''
mav = self.master
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
1, 0, 0, 0, 0, 0, 0)
def mavlink_packet(self, m):
'''handle mavlink packets'''
if self.accelcal_count != -1:
if m.get_type() == 'STATUSTEXT':
# handle accelcal packet
text = str(m.text)
if text.startswith('Place '):
self.accelcal_wait_enter = True
self.empty_input_count = self.mpstate.empty_input_count
def idle_task(self):
'''handle mavlink packets'''
if self.accelcal_count != -1:
if self.accelcal_wait_enter and self.empty_input_count != self.mpstate.empty_input_count:
self.accelcal_wait_enter = False
self.accelcal_count += 1
# tell the APM that user has done as requested
self.master.mav.command_ack_send(self.accelcal_count, 1)
if self.accelcal_count >= 6:
self.accelcal_count = -1
if self.compassmot_running:
if self.mpstate.empty_input_count != self.empty_input_count:
# user has hit enter, stop the process
self.compassmot_running = False
print("sending stop")
self.master.mav.command_ack_send(0, 1)
def cmd_compassmot(self, args):
'''do a compass/motor interference calibration'''
mav = self.master
print("compassmot starting")
mav.mav.command_long_send(mav.target_system, mav.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 0, 0, 0, 1, 0)
self.compassmot_running = True
self.empty_input_count = self.mpstate.empty_input_count
def cmd_calpressure(self, args):
'''calibrate pressure sensors'''
self.master.calibrate_pressure()
def init(mpstate):
'''initialise module'''
return CalibrationModule(mpstate)
|
cliftonmcintosh/openstates
|
openstates/hi/events.py
|
Python
|
gpl-3.0
| 3,151
| 0.001269
|
from openstates.utils import LXMLMixin
import datetime as dt
from pupa.scrape import Scraper, Event
from .utils import get_short_codes
from requests import HTTPError
import pytz
URL = "http://www.capitol.hawaii.gov/upcominghearings.aspx"
class HIEventScraper(Scraper, LXMLMixin):
def get_related_bills(self, href):
ret = []
try:
page = self.lxmlize(href)
except HTTPError:
return ret
bills = page.xpath(".//a[contains(@href, 'Bills')]")
for bill in bills:
try:
row = next(bill.iterancestors(tag='tr'))
except StopIteration:
continue
tds = row.xpath("./td")
descr = tds[1].text_content()
for i in ['\r\n', '\xa0']:
descr = descr.replace(i, '')
ret.append({"bill_id": bill.text_content(),
"type": "consideration",
"descr": descr})
return ret
def scrape(self):
tz = pytz.timezone("US/Eastern")
get_short_codes(self)
page = self.lxmlize(URL)
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
for event in table.xpath(".//tr")[1:]:
tds = event.xpath("./td")
committee = tds[0].text_content().strip()
descr = [x.text_content() for x in tds[1].xpath(".//span")]
if len(descr) != 1:
raise Exception
descr = descr[0].replace('.', '').strip()
when = tds[2].tex
|
t_content().strip()
where = tds[3].text_content().strip()
notice = tds[4].xpath(".//a")[0]
notice_href = notice.attrib['href']
notice_name = notice.text
when = d
|
t.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
when = pytz.utc.localize(when)
event = Event(name=descr, start_time=when, classification='committee-meeting',
description=descr, location_name=where, timezone=tz.zone)
if "/" in committee:
committees = committee.split("/")
else:
committees = [committee]
for committee in committees:
if "INFO" not in committee:
committee = self.short_ids.get("committee", {"chamber": "unknown",
"name": committee})
else:
committee = {
"chamber": "joint",
"name": committee,
}
event.add_committee(committee['name'], note='host')
event.add_source(URL)
event.add_document(notice_name,
notice_href,
media_type='text/html')
for bill in self.get_related_bills(notice_href):
a = event.add_agenda_item(description=bill['descr'])
a.add_bill(
bill['bill_id'],
note=bill['type']
)
yield event
|
sarvex/tensorflow
|
tensorflow/python/keras/tests/add_loss_correctness_test.py
|
Python
|
apache-2.0
| 16,393
| 0.007137
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests add_loss API correctness."""
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.keras import Input
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import Model
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.keras import Sequential
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
MAE = losses.MeanAbsoluteError
mae = losses.mean_absolute_error
def get_ctl_train_step(model):
optimizer = optimizer_v2.gradient_descent.SGD(0.05)
def train_step(x, y, w=None):
with backprop.GradientTape() as tape:
if w is not None:
model([x, y, w])
else:
model([x, y])
loss = math_ops.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
return train_step
# TODO(psv): Add tests cases where a model is used in loss function but is
# not part of the training model.
class TestAddLossCorrectness(keras_parameterized.TestCase):
def setUp(self):
super(TestAddLossCorrectness, self).setUp()
self.x = np.array([[0.], [1.], [2.]], dtype='float32')
self.y = np.array([[0.5], [2.], [3.5]], dtype='float32')
self.w = np.array([[1.25], [0.5], [1.25]], dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_loss_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_fit(self):
model = testing_utils.get_model_from_layers([testing_utils.Bias()],
input_shape=(1,))
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(self.x, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(3 * math_ops.reduce_mean(sw * mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.025),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_with_sample_weight_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(math_ops.reduce_mean(sw * mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_model_call(self):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.bias = testing_utils.Bias()
def call(self, inputs):
outputs =
|
self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], outputs, inputs[2]))
self.add_loss(math_ops.reduce_m
|
ean(inputs[2] * mae(inputs[1], outputs)))
return outputs
model = MyModel()
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertEqual(len(model.losses), 2)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
eval_out = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(eval_out, 1.0, 3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_layer_call(self):
class MyLayer(layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.bias = testing_utils.Bias()
def call
|
ChinaNetCloud/nc-backup-py
|
nc-backup-py/tests/backup_main_commands_test.py
|
Python
|
apache-2.0
| 293
| 0.010239
|
import unittest
impor
|
t config_test
from backupcmd.commands import backupCommands
class BackupCommandsTestCase(unittest.TestCase):
"""Test commands passed to main script"""
def test_hyphen_r_option(self):
print 'Pending BackupComm
|
andsTestCase'
self.assertEqual(1,1)
|
anthraxx/pwndbg
|
pwndbg/commands/mprotect.py
|
Python
|
mit
| 2,402
| 0.000833
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import gdb
import pwndbg.chain
import pwndbg.commands
import pwndbg.enhance
import pwndbg.file
import pwndbg.which
import pwndbg.wrappers.checksec
import pwndbg.wrappers.readelf
from pwndbg.color import message
parser = argparse.ArgumentParser(description='Calls mprotect. x86_64 only.')
parser.add_argument('addr', help='Page-aligned address to all mprotect on.',
type=int)
parser.add_argument('length', help='Count of bytes to call mprotect on. Needs '
'to be multiple of page size.',
type=int)
parser.add_argument('prot', help='Prot string as in mprotect(2). Eg. '
'"PROT_READ|PROT_EXEC"', type=str)
SYS_MPROTECT = 0x7d
prot_dict = {
'PROT_NONE': 0x0,
'PROT_READ': 0x1,
'PROT_WRITE': 0x2,
'PROT_EXEC': 0x4,
}
def prot_str_to_val(protstr):
'''Heuristic to convert PROT_EXEC|PROT_WRITE to integer value.'''
prot_int = 0
for k in prot_dict:
if k in protstr:
prot_int |= prot_dict[k]
return prot_int
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
@pwndbg.commands.OnlyAmd64
def mprotect(addr, length, prot):
'''Only x86_64.'''
saved_rax = pwndbg.regs.rax
saved_rbx = pwndbg.regs.rbx
saved_rcx = pwndbg.regs.rcx
saved_rdx = pwndbg.regs.rdx
saved_rip = pwndbg.regs.rip
prot_int = prot_str_to_val(prot)
gdb.execute('set $rax={}'.format(SYS_MPROTECT))
gdb.execute('set $rbx={}'.format(addr))
gdb.execute('set $rcx={}'.format(length))
gdb.execute('set $rdx={}'.format(prot_int))
saved_instruction_2bytes = pwndbg.memory.read(pwndbg.regs.rip, 2)
# int 0x80
pwndbg.memory.write(pwndbg.regs.rip, b'\xcd\x80')
# execute syscall
gdb.execute('stepi')
|
print('mprotect returned {}'.format(pwndbg.regs.rax))
# restore registers and memory
pwndbg.memory.write(saved_rip, saved_instruction_2bytes)
gdb.execute('set $rax={}'.format(saved_rax))
gdb.execute('set $rbx={}'.format(saved_rbx))
gdb.execute('set $rcx={}'.format(saved_rcx))
gdb.execute('set $rdx={}'.format
|
(saved_rdx))
gdb.execute('set $rip={}'.format(saved_rip))
pwndbg.regs.rax = saved_rax
pwndbg.regs.rbx = saved_rbx
pwndbg.regs.rcx = saved_rcx
pwndbg.regs.rdx = saved_rdx
pwndbg.regs.rip = saved_rip
|
xuru/pyvisdk
|
pyvisdk/do/virtual_hardware_option.py
|
Python
|
mit
| 1,413
| 0.012031
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualHardwareOption(vim, *args, **kwargs):
'''The VirtualHardwareOption data object contains the options available for all
virtual devices.'''
obj = vim.client.factory.create('ns0:VirtualHardwareOption')
# do some validation checking...
if (len(args) + len(kwargs)) < 14:
raise IndexError('Expected at least 15 arguments got: %d' % len(args))
required = [ 'deviceListReadonly', 'hwVersion', 'memoryMB', 'numCoresPerSocket', 'numCPU',
'numCpuReadonly', 'numIDEControllers', 'numPCIControllers',
'numPS2Controllers', 'numSIOControllers', 'numUSBControllers',
'numUSBXHCIControllers', 'resourceConfigOption', 'virtualDeviceOption' ]
optional = [ 'licensingLimit', 'numSupportedWwnNodes', 'numSupportedWwnPorts',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
|
else:
raise InvalidArgumentError("Invalid argum
|
ent: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
VisTrails/VisTrails
|
vistrails/packages/pipelineEdit/__init__.py
|
Python
|
bsd-3-clause
| 2,254
| 0.011979
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
|
## All rights reserved.
## Contact: contact@
|
vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This package implements a very simple Qt GUI that can load a
pipeline, change its parameters based on aliases, and execute them on
the spreadsheet."""
from __future__ import division
identifier = 'org.vistrails.vistrails.pipelineedit'
name = 'Pipeline Editor'
version = '0.0.2'
old_identifiers = ['edu.utah.sci.vistrails.pipelineedit']
|
statsmodels/statsmodels
|
statsmodels/multivariate/tests/test_factor.py
|
Python
|
bsd-3-clause
| 11,521
| 0.000868
|
# -*- coding: utf-8 -*-
import warnings
from statsmodels.compat.pandas import PD_LT_1_4
import os
import numpy as np
import pandas as pd
from statsmodels.multivariate.factor import Factor
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_raises, assert_array_equal,
assert_array_less, assert_allclose)
import pytest
try:
import matplotlib.pyplot as plt
missing_matplotlib = False
plt.switch_backend('Agg')
except ImportError:
missing_matplotlib = True
# Example data
# https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
# viewer.htm#statug_introreg_sect012.htm
X = pd.DataFrame([['Minas Graes', 2.068, 2.070, 1.580, 1, 0],
['Minas Graes', 2.068, 2.074, 1.602, 2, 1],
['Minas Graes', 2.090, 2.090, 1.613, 3, 0],
['Minas Graes', 2.097, 2.093, 1.613, 4, 1],
['Minas Graes', 2.117, 2.125, 1.663, 5, 0],
['Minas Graes', 2.140, 2.146, 1.681, 6, 1],
['Matto Grosso', 2.045, 2.054, 1.580, 7, 0],
['Matto Grosso', 2.076, 2.088, 1.602, 8, 1],
['Matto Grosso', 2.090, 2.093, 1.643, 9, 0],
['Matto Grosso', 2.111, 2.114, 1.643, 10, 1],
['Santa Cruz', 2.093, 2.098, 1.653, 11, 0],
['Santa Cruz', 2.100, 2.106, 1.623, 12, 1],
['Santa Cruz', 2.104, 2.101, 1.653, 13, 0]],
columns=['Loc', 'Basal', 'Occ', 'Max', 'id', 'alt'])
def test_auto_col_name():
# Test auto generated variable names when endog_names is None
mod = Factor(None, 2, corr=np.eye(11), endog_names=None,
smc=False)
assert_array_equal(mod.endog_names,
['var00', 'var01', 'var02', 'var03', 'var04', 'var05',
'var06', 'var07', 'var08', 'var09', 'var10'])
def test_direct_corr_matrix():
# Test specifying the correlation matrix directly
mod = Factor(None, 2, corr=np.corrcoef(X.iloc[:, 1:-1], rowvar=0),
smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Test set and get endog_names
mod.endog_names = X.iloc[:, 1:-1].columns
assert_array_equal(mod.endog_names, ['Basal', 'Occ', 'Max', 'id'])
# Test set endog_names with the wrong number of elements
assert_raises(ValueError, setattr, mod, 'endog_names',
X.iloc[:, :1].columns)
def test_unknown_fa_method_error():
# Test raise error if an unkonwn FA method is specified in fa.method
mod = Factor(X.iloc[:, 1:-1], 2, method='ab')
assert_raises(ValueError, mod.fit)
def test_example_compare_to_R_output():
# Testing basic functions and compare to R output
# R code for producing the results:
# library(psych)
# library(GPArotation)
# Basal = c(2.068, 2.068, 2.09, 2.097, 2.117, 2.14, 2.045, 2.076, 2.09, 2.111, 2.093, 2.1, 2.104)
# Occ = c(2.07, 2.074, 2.09, 2.093, 2.125, 2.146, 2.054, 2.088, 2.093, 2.114, 2.098, 2.106, 2.101)
# Max = c(1.58, 1.602, 1.613, 1.613, 1.663, 1.681, 1.58, 1.602, 1.643, 1.643, 1.653, 1.623, 1.653)
# id = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
# Y <- cbind(Basal, Occ, Max, id)
# a <- fa(Y, nfacto
|
rs=2, fm="pa", rotate="none", SMC=FALSE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="Promax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, n
|
factors=2, fm="pa", rotate="Varimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="quartimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="oblimin", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# No rotation without squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# No rotation WITH squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
results = mod.fit()
a = np.array([[0.97541115, 0.20280987],
[0.97113975, 0.17207499],
[0.9618705, -0.2004196],
[0.37570708, -0.45821379]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Same as R GRArotation
results.rotate('varimax')
a = np.array([[0.98828898, -0.12587155],
[0.97424206, -0.15354033],
[0.84418097, -0.502714],
[0.20601929, -0.55558235]])
assert_array_almost_equal(results.loadings, a, decimal=8)
results.rotate('quartimax') # Same as R fa
a = np.array([[0.98935598, 0.98242714, 0.94078972, 0.33442284],
[0.117190049, 0.086943252, -0.283332952, -0.489159543]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
results.rotate('equamax') # Not the same as R fa
results.rotate('promax') # Not the same as R fa
results.rotate('biquartimin') # Not the same as R fa
results.rotate('oblimin') # Same as R fa
a = np.array([[1.02834170170, 1.00178840104, 0.71824931384,
-0.00013510048],
[0.06563421, 0.03096076, -0.39658839, -0.59261944]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
# Testing result summary string
results.rotate('varimax')
desired = (
""" Factor analysis results
=============================
Eigenvalues
-----------------------------
Basal Occ Max id
-----------------------------
2.9609 0.3209 0.0000 -0.0000
-----------------------------
-----------------------------
Communality
-----------------------------
Basal Occ Max id
-----------------------------
0.9926 0.9727 0.9654 0.3511
-----------------------------
-----------------------------
Pre-rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9754 0.2028
Occ 0.9711 0.1721
Max 0.9619 -0.2004
id 0.3757 -0.4582
-----------------------------
-----------------------------
varimax rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9883 -0.1259
Occ 0.9742 -0.1535
Max 0.8442 -0.5027
id 0.2060 -0.5556
=============================
""")
actual = results.summary().as_text()
actual = "\n".join(line.rstrip() for line in actual.splitlines()) + "\n"
assert_equal(actual, desired)
@pytest.mark.skipif(missing_matplotlib, reason='matplotlib not available')
def test_plots(close_figures):
mod = Factor(X.iloc[:, 1:], 3)
results = mod.fit()
results.rotate('oblimin')
fig = results.plot_scree()
fig_loadings = results.plot_loadings()
assert_equal(3, len(fig_loadings))
@pytest.mark.smoke
def test_getframe_smoke():
# mostly smoke tests for now
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
res = mod.fit()
df = res.get_loadings_frame(style='raw')
assert_(isinstance(df, pd.DataFrame))
lds = res.get_loadings_frame(style='strings', decimals=3, threshold=0.3)
# The Styler option require jinja2, skip if not available
try:
fr
|
twamarc/schemaorg
|
lib/rdflib_jsonld/serializer.py
|
Python
|
apache-2.0
| 12,215
| 0.001555
|
# -*- coding: utf-8 -*-
"""
This serialiser will output an RDF Graph as a JSON-LD formatted document. See:
http://json-ld.org/
Example usage::
>>> from rdflib.plugin import register, Serializer
>>> register('json-ld', Serializer, 'rdflib_jsonld.serializer', 'JsonLDSerializer')
>>> from rdflib import Graph
>>> testrdf = '''
... @prefix dc: <http://purl.org/dc/terms/> .
... <http://example.org/about>
... dc:title "Someone's Homepage"@en .
... '''
>>> g = Graph().parse(data=testrdf, format='n3')
>>> print(g.serialize(format='json-ld', indent=4).decode())
[
{
"@id": "http://example.org/about",
"http://purl.org/dc/terms/title": [
{
"@language": "en",
"@value": "Someone's Homepage"
}
]
}
]
"""
# NOTE: This code writes the entire JSON object into memory before serialising,
# but we should consider streaming the output to deal with arbitrarily large
# graphs.
import warnings
from rdflib.serializer import Serializer
from rdflib.graph import Graph
from rdflib.term import URIRef, Literal, BNode
from rdflib.namespace import RDF, XSD
from .context import Context, UNDEF
from .util import json
from .keys import CONTEXT, GRAPH, ID, VOCAB, LIST, SET, LANG
__all__ = ['JsonLDSerializer', 'from_rdf']
PLAIN_LITERAL_TYPES = set([XSD.boolean, XSD.integer, XSD.double, XSD.string])
class JsonLDSerializer(Serializer):
def __init__(self, store):
super(JsonLDSerializer, self).__init__(store)
def serialize(self, stream, base=None, encoding=None, **kwargs):
# TODO: docstring w. args and return value
encoding = encoding or 'utf-8'
if encoding not in ('utf-8', 'utf-16'):
warnings.warn("JSON should be encoded as unicode. " +
"Given encoding was: %s" % encoding)
context_data = kwargs.get('context')
use_native_types = kwargs.get('use_native_types', True),
use_rdf_type = kwargs.get('use_rdf_type', False)
auto_compact = kwargs.get('auto_compact', True)
indent = kwargs.get('indent', 2)
separators = kwargs.get('separators', (',', ': '))
sort_keys = kwargs.get('sort_keys', True)
ensure_ascii = kwargs.get('ensure_ascii', False)
obj = from_rdf(self.store, context_data, base,
use_native_types, use_rdf_type,
auto_compact=auto_compact)
data = json.dumps(obj, indent=indent, separators=separators,
sort_keys=sort_keys, ensure_ascii=ensure_ascii)
stream.write(data.encode(encoding, 'replace'))
def from_rdf(graph, context_data=None, base=None,
use_native_types=False, use_rdf_type=False,
auto_compact=False, startnode=None, index=False):
# TODO: docstring w. args and return value
# TODO: support for index and startnode
if not context_data and auto_compact:
context_data = dict(
(pfx, unicode(ns))
for (pfx, ns) in graph.namespaces() if pfx and
unicode(ns) != u"http://www.w3.org/XML/1998/namespace")
if isinstance(context_data, Context):
context = context_data
context_data = context.to_dict()
else:
context = Context(context_data, base=base)
converter = Converter(context, use_native_types, use_rdf_type)
result = converter.convert(graph)
if converter.context.active:
if isinstance(result, list):
result = {context.get_key(GRAPH): result}
result[CONTEXT] = context_data
return result
class Converter(object):
def __init__(self, context, use_native_types, use_rdf_type):
self.context = context
self.use_native_types = context.active or use_native_types
self.use_rdf_type = use_rdf_type
def convert(self, graph):
# TODO: bug in rdflib dataset parsing (nquads et al):
# plain triples end up in separate unnamed graphs (rdflib issue #436)
if graph.context_aware:
default_graph = Graph()
graphs = [default_graph]
for g in graph.contexts():
if isinstance(g.identifier, URIRef):
graphs.append(g)
|
else:
default_graph +
|
= g
else:
graphs = [graph]
context = self.context
objs = []
for g in graphs:
obj = {}
graphname = None
if isinstance(g.identifier, URIRef):
graphname = context.shrink_iri(g.identifier)
obj[context.id_key] = graphname
nodes = self.from_graph(g)
if not graphname and len(nodes) == 1:
obj.update(nodes[0])
else:
if not nodes:
continue
obj[context.graph_key] = nodes
if objs and objs[0].get(context.get_key(ID)) == graphname:
objs[0].update(obj)
else:
objs.append(obj)
if len(graphs) == 1 and len(objs) == 1 and not self.context.active:
default = objs[0]
items = default.get(context.graph_key)
if len(default) == 1 and items:
objs = items
elif len(objs) == 1 and self.context.active:
objs = objs[0]
return objs
def from_graph(self, graph):
nodemap = {}
for s in set(graph.subjects()):
## only iri:s and unreferenced (rest will be promoted to top if needed)
if isinstance(s, URIRef) or (isinstance(s, BNode)
and not any(graph.subjects(None, s))):
self.process_subject(graph, s, nodemap)
return nodemap.values()
def process_subject(self, graph, s, nodemap):
if isinstance(s, URIRef):
node_id = self.context.shrink_iri(s)
elif isinstance(s, BNode):
node_id = s.n3()
else:
node_id = None
#used_as_object = any(graph.subjects(None, s))
if node_id in nodemap:
return None
node = {}
node[self.context.id_key] = node_id
nodemap[node_id] = node
for p, o in graph.predicate_objects(s):
self.add_to_node(graph, s, p, o, node, nodemap)
return node
def add_to_node(self, graph, s, p, o, s_node, nodemap):
context = self.context
if isinstance(o, Literal):
datatype = unicode(o.datatype) if o.datatype else None
language = o.language
term = context.find_term(unicode(p), datatype, language=language)
else:
containers = [LIST, None] if graph.value(o, RDF.first) else [None]
for container in containers:
for coercion in (ID, VOCAB, UNDEF):
term = context.find_term(unicode(p), coercion, container)
if term:
break
if term:
break
node = None
use_set = not context.active
if term:
p_key = term.name
if term.type:
node = self.type_coerce(o, term.type)
elif term.language and o.language == term.language:
node = unicode(o)
elif context.language and (
term.language is None and o.language is None):
node = unicode(o)
if term.container == SET:
use_set = True
elif term.container == LIST:
node = [self.type_coerce(v, term.type) or self.to_raw_value(graph, s, v, nodemap)
for v in self.to_collection(graph, o)]
elif term.container == LANG and language:
value = s_node.setdefault(p_key, {})
values = value.get(language)
node = unicode(o)
if values:
if not isinstance(values, list):
value[language] = values = [values]
values.append(node)
else:
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam
|
PyFoam/Basics/STLFile.py
|
Python
|
gpl-2.0
| 6,542
| 0.024152
|
# ICE Revision: $Id$
"""Read a STL file and do simple manipulations"""
from os import path
from PyFoam.Error import error
from PyFoam.ThirdParty.six import next as iterNext
class STLFile(object):
"""Store a complete STL-file and do simple manipulations with it"""
noName="<no name given>"
def __init__(self,fName=None):
"""
@param fName: filename of the STL-file. If None then an empty file is created
"""
self._fp=None
if hasattr(fName, 'read'):
# seems to be a filehandle
self._fp=fName
if hasattr(fName,'name'):
self._filename=fName.name
else:
self._filename="<filehandle>"
else:
self._filename=fName
if self._fp==None:
if fName!=None:
self._contents=[l.strip() for l in open(fName).readlines()]
else:
self._contents=[]
else:
self._contents=[l.strip() for l in self._fp.readlines()]
self.resetInfo()
def resetInfo(self):
"""Set cached info to nothing"""
self._patchInfo=None
def filename(self):
"""The filename (without the full patch)"""
if self._filename==None:
return "<no filename given>"
else:
return path.basename(self._filename)
def expectedToken(self,l,token,i):
if l.strip().find(token)!=0:
error("'%s' expected in line %d of %s" % (token,i+1,self.filename()))
def erasePatches(self,patchNames):
"""Erase the patches in the list"""
processed=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch name",parts[1],"Expected",currentName)
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
keep=nextState
self._contents=processed
def mergePatches(self,patchNames,targetPatchName):
"""Merge the patches in the list and put them into a new patch"""
processed=[]
saved=[]
keep=True
currentName=None
for l in self._contents:
nextState=keep
parts=l.split()
if len(parts)>0:
if parts[0]=="endsolid":
nextState=True
if currentName!=parts[1]:
error("Patch
|
name",parts[1],"Expected",currentName)
|
currentName=None
elif parts[0]=="solid":
currentName=parts[1]
if currentName in patchNames:
keep=False
nextState=False
if keep:
processed.append(l)
elif len(parts)>0:
if parts[0] not in ["solid","endsolid"]:
saved.append(l)
keep=nextState
self._contents=processed
self._contents.append("solid "+targetPatchName)
self._contents+=saved
self._contents.append("endsolid "+targetPatchName)
def patchInfo(self):
"""Get info about the patches. A list of dictionaries with the relevant information"""
if self._patchInfo:
return self._patchInfo
self._patchInfo=[]
newPatch=True
e=enumerate(self._contents)
goOn=True
while goOn:
try:
i,l=iterNext(e)
if newPatch:
self.expectedToken(l,"solid",i)
info={}
if len(l.split())<2:
info["name"]=self.noName
else:
info["name"]=l.split()[1]
info["start"]=i+1
info["facets"]=0
info["min"]=[1e100]*3
info["max"]=[-1e100]*3
newPatch=False
elif l.strip().find("endsolid")==0:
info["end"]=i+1
self._patchInfo.append(info)
newPatch=True
else:
self.expectedToken(l,"facet normal",i)
i,l=iterNext(e)
self.expectedToken(l,"outer loop",i)
for v in range(3):
i,l=iterNext(e)
self.expectedToken(l,"vertex",i)
info["min"]=[min(m) for m in zip(info["min"],
[float(v) for v in l.strip().split()[1:4]])]
info["max"]=[max(m) for m in zip(info["max"],
[float(v) for v in l.strip().split()[1:4]])]
i,l=iterNext(e)
self.expectedToken(l,"endloop",i)
i,l=iterNext(e)
self.expectedToken(l,"endfacet",i)
info["facets"]+=1
except StopIteration:
goOn=False
if not newPatch:
error("File",self.filename(),"seems to be incomplete")
return self._patchInfo
def writeTo(self,fName):
"""Write to a file"""
if hasattr(fName, 'write'):
f=fName
else:
f=open(fName,"w")
f.write("\n".join(self._contents))
def __iter__(self):
for l in self._contents:
yield l
def __iadd__(self,other):
self.resetInfo()
fName=path.splitext(other.filename())[0]
moreThanOne=len(other.patchInfo())>1
nr=1
for l in other:
if l.strip().find("solid")==0 or l.strip().find("endsolid")==0:
parts=l.split()
if len(parts)==1:
l=parts[0]+" "+fName
if moreThanOne:
l+="_%04d" % nr
else:
l=parts[0]+" %s:%s" %(fName," ".join(parts[1:]))
if parts[0]=="solid":
nr+=1
self._contents.append(l)
return self
# Should work with Python3 and Python2
|
olduvaihand/ProjectEuler
|
src/python/problem074.py
|
Python
|
mit
| 1,088
| 0.001842
|
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem074.py
#
# Digit factorial chains
# ======================
# Published on Friday, 16th July 2004, 06:00 pm
#
# The number 145 is well known for the property that the sum of the factorial
# of its
|
digits is equal to 145: 1! + 4! + 5! = 1 + 24 + 120 = 145 Perhaps less
# well known is 169, in that it produces the longest chain of numbers that link
# back to 169; it t
|
urns out that there are only three such loops that exist:
# 169 363601 1454 169 871 45361 871 872 45362 872 It is not difficult to
# prove that EVERY starting number will eventually get stuck in a loop. For
# example, 69 363600 1454 169 363601 ( 1454) 78 45360 871 45361 ( 871)
# 540 145 ( 145) Starting with 69 produces a chain of five non-repeating
# terms, but the longest non-repeating chain with a starting number below one
# million is sixty terms. How many chains, with a starting number below one
# million, contain exactly sixty non-repeating terms?
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
yandex/mastermind
|
src/cocaine-app/indexes.py
|
Python
|
gpl-2.0
| 3,478
| 0.001438
|
from Queue import Queue
import elliptics
class SecondaryIndex(object):
def __init__(self, idx, key_tpl, meta_session):
self.idx = idx
self.key_tpl = key_tpl
self.meta_session = meta_session
def __iter__(self):
for idx in self.meta_session.find_all_indexes([self.idx]):
yield idx.indexes[0].data
def __setitem__(self, key, val):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.set_indexes(eid, [self.idx], [val])
def __getitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
return self.meta_session.list_indexes(eid).get()[0].data
def __delitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.set_indexes(eid, [], [])
class TagSecondaryIndex(object):
BATCH_SIZE = 500
def __init__(self, main_idx, idx_tpl, key_tpl, meta_session, logger=None, namespace=None, batch_size=BATCH_SIZE):
self.main_idx = main_idx
self.idx_tpl = idx_tpl
self.key_tpl = key_tpl
self.meta_session = meta_session.clone()
if namespace:
self.meta_session.set_namespace(namespace)
self.batch_size = batch_size
self.logger = logger
def __iter__(self):
idxes = [idx.id for idx in
self.meta_session.clone().find_all_indexes([self.main_idx]).get()]
for data in self._iter_keys(idxes):
yield data
def tagged(sel
|
f, tag):
idxes = [idx.id for idx in
self.meta_session.clone().find_all_indexes([self.main_idx, self.idx_tpl % tag])]
self.logger.info('Received {0} records from tagged index {1}'.format(
|
len(idxes), self.idx_tpl % tag))
processed = 0
for data in self._iter_keys(idxes):
processed += 1
yield data
self.logger.info('Processed {0} records from tagged index {1}'.format(
processed, self.idx_tpl % tag))
def __setitem__(self, key, val):
eid = self.meta_session.transform(self.key_tpl % key)
self.meta_session.clone().write_data(eid, val).get()
def __getitem__(self, key):
eid = self.meta_session.transform(self.key_tpl % key)
return self.meta_session.clone().read_latest(eid).get()[0].data
def set_tag(self, key, tag=None):
eid = self.meta_session.transform(self.key_tpl % key)
tags = [self.main_idx]
if tag:
tags.append(self.idx_tpl % tag)
self.meta_session.clone().set_indexes(eid, tags, [''] * len(tags))
def _fetch_response_data(self, req):
data = None
try:
result = req[1]
result.wait()
data = result.get()[0].data
except Exception as e:
self.logger.error('Failed to fetch record from tagged index: {0} ({1})'.format(req[0], e))
return data
def _iter_keys(self, keys):
if not keys:
return
q = Queue(self.batch_size)
s = self.meta_session.clone()
for k in keys:
if not q.full():
q.put((k, s.read_latest(k)))
else:
data = self._fetch_response_data(q.get())
q.put((k, s.read_latest(k)))
if data:
yield data
while q.qsize():
data = self._fetch_response_data(q.get())
if data:
yield data
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/test/test_warnings.py
|
Python
|
gpl-2.0
| 3,221
| 0.003105
|
import warnings
import os
import unittest
from test import test_support
# The warnings module isn't easily tested, because it relies on module
# globals to store configuration information.
|
setUp() and tearDown()
# preserve the current settings to avoid bashing them while running tests.
# To capture the warning messages, a replacement for showwarning() is
# used to save warning information in a global variable.
class WarningMessage:
"Holds results of latest
|
showwarning() call"
pass
def showwarning(message, category, filename, lineno, file=None):
msg.message = str(message)
msg.category = category.__name__
msg.filename = os.path.basename(filename)
msg.lineno = lineno
class TestModule(unittest.TestCase):
def setUp(self):
global msg
msg = WarningMessage()
self._filters = warnings.filters[:]
self._showwarning = warnings.showwarning
warnings.showwarning = showwarning
self.ignored = [w[2].__name__ for w in self._filters
if w[0]=='ignore' and w[1] is None and w[3] is None]
def tearDown(self):
warnings.filters = self._filters[:]
warnings.showwarning = self._showwarning
def test_warn_default_category(self):
for i in range(4):
text = 'multi %d' %i # Different text on each call
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
def test_warn_specific_category(self):
text = 'None'
# XXX OverflowWarning should go away for Python 2.5.
for category in [DeprecationWarning, FutureWarning, OverflowWarning,
PendingDeprecationWarning, RuntimeWarning,
SyntaxWarning, UserWarning, Warning]:
if category.__name__ in self.ignored:
text = 'filtered out' + category.__name__
warnings.warn(text, category)
self.assertNotEqual(msg.message, text)
else:
text = 'unfiltered %s' % category.__name__
warnings.warn(text, category)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, category.__name__)
def test_filtering(self):
warnings.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, warnings.warn, 'convert to error')
warnings.resetwarnings()
text = 'handle normally'
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
warnings.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
warnings.warn(text)
self.assertNotEqual(msg.message, text)
warnings.resetwarnings()
warnings.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, warnings.warn, 'hex/oct')
text = 'nonmatching text'
warnings.warn(text)
self.assertEqual(msg.message, text)
self.assertEqual(msg.category, 'UserWarning')
def test_main(verbose=None):
test_support.run_unittest(TestModule)
if __name__ == "__main__":
test_main(verbose=True)
|
rwl/openpowersystem
|
cpsm/load_model/season.py
|
Python
|
agpl-3.0
| 1,789
| 0.00559
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
# <<< imports
# @generated
from cpsm.element import Element
from cpsm.load_model import SeasonName
from google.appengi
|
ne.ext import db
# >>> imports
class Season(Element):
""" A specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
# <<< season.attributes
# @generated
# Date season ends
end_date = db.DateTimeProperty()
# Date season starts
start_date = db.DateTimeProperty()
# Name of the Season
name = SeasonName
# >>> season.attributes
# <<< season.references
# @generated
# Virtual property. Schedules that use this Season.
|
pass # season_day_type_schedules
# >>> season.references
# <<< season.operations
# @generated
# >>> season.operations
# EOF -------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.