code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.RoleParameters import RoleParameters
from cairis.core.TemplateAssetParameters import TemplateAssetParameters
from cairis.core.TemplateGoalParameters import TemplateGoalParameters
from cairis.core.ValueTypeParameters import ValueTypeParameters
from cairis.core.ComponentParameters import ComponentParameters
from cairis.core.ConnectorParameters import ConnectorParameters
from cairis.core.ComponentViewParameters import ComponentViewParameters
__author__ = 'Shamal Faily'
class ComponentViewTest(unittest.TestCase):
def setUp(self):
call([os.environ['CAIRIS_CFG_DIR'] + "/initdb.sh"])
cairis.core.BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/componentviews.json')
d = json.load(f)
f.close()
self.theRequirements = []
self.theRoles = []
self.iRoles = d['roles']
for i in self.iRoles:
self.theRoles.append(RoleParameters(i["theName"], i["theType"], i["theShortCode"], i["theDescription"],[]))
self.theMetricTypes = []
self.iAccessRights = d['access_rights']
for i in self.iAccessRights:
self.theMetricTypes.append(ValueTypeParameters(i["theName"], i["theDescription"], 'access_right','',i["theValue"],i["theRationale"]))
self.iSurfaceTypes = d['surface_types']
for i in self.iSurfaceTypes:
self.theMetricTypes.append(ValueTypeParameters(i["theName"], i["theDescription"], 'surface_type','',i["theValue"],i["theRationale"]))
self.iProtocols = d['protocols']
for i in self.iProtocols:
self.theMetricTypes.append(ValueTypeParameters(i["theName"], i["theDescription"], 'protocol','',i["theValue"],i["theRationale"]))
self.iPrivileges = d['privileges']
for i in self.iPrivileges:
self.theMetricTypes.append(ValueTypeParameters(i["theName"], i["theDescription"], 'privilege','',i["theValue"],i["theRationale"]))
self.theAssets = []
spValues = [0,0,0,0,0,0,0,0,]
srValues = ['None','None','None','None','None','None','None','None']
self.iTemplateAssets = d['template_assets']
for i in self.iTemplateAssets:
self.theAssets.append(TemplateAssetParameters(i["theName"], i["theShortCode"], i["theDescription"], i["theSignificance"],i["theType"],i["theSurfaceType"],i["theAccessRight"],spValues,srValues,[],[]))
self.theGoals = []
self.iTemplateGoals = d['template_goals']
for i in self.iTemplateGoals:
self.theGoals.append(TemplateGoalParameters(i["theName"],i["theDefinition"],i["theRationale"],i["theConcerns"],i["theResponsibilities"]))
self.iComponentViews = d['architectural_patterns']
def testComponentView(self):
cvName = self.iComponentViews[0]["theName"]
cvSyn = self.iComponentViews[0]["theSynopsis"]
theComponents = []
for c in self.iComponentViews[0]["theComponents"]:
cName = c["theName"]
cDesc = c["theDescription"]
cInts = []
for i in c["theInterfaces"]:
cInts.append((i["theName"],i["theType"],i["theAccessRight"],i["thePrivilege"]))
cStructs = []
for cs in c["theStructure"]:
cStructs.append((cs["theHeadAsset"],cs["theHeadAdornment"],cs["theHeadNav"],cs["theHeadNry"],cs["theHeadRole"],cs["theTailRole"],cs["theTailNry"],cs["theTailNav"],cs["theTailAdornment"],cs["theTailAsset"]))
cReqs = []
cGoals = []
for i in c["theGoals"]:
cGoals.append(i)
cGoalAssocs = []
for cga in c["theGoalAssociations"]:
cGoalAssocs.append((cga["theGoalName"],cga["theSubGoalName"],cga["theRefType"],'None'))
theComponents.append(ComponentParameters(cName,cDesc,cInts,cStructs,cReqs,cGoals,cGoalAssocs))
theConnectors = []
for conn in self.iComponentViews[0]["theConnectors"]:
theConnectors.append(ConnectorParameters(conn["theConnectorName"],cvName,conn["theFromComponent"],conn["theFromRole"],conn["theFromInterface"],conn["theToComponent"],conn["theToInterface"],conn["theToRole"],conn["theAssetName"],conn["theProtocol"],conn["theAccessRight"]))
icvp = ComponentViewParameters(cvName,cvSyn,self.theMetricTypes,self.theRoles,self.theAssets,self.theRequirements,self.theGoals,theComponents,theConnectors)
b = Borg()
b.dbProxy.addComponentView(icvp)
ocvps = b.dbProxy.getComponentViews()
ocvp = ocvps[cvName]
self.assertEqual(icvp.name(), ocvp.name())
self.assertEqual(icvp.synopsis(), ocvp.synopsis())
self.assertEqual(b.dbProxy.componentAttackSurface('Policy Manager'),3.0)
cg = b.dbProxy.componentGoalModel('Policy Manager')
icvp.setId(ocvp.id())
icvp.theSynopsis = 'revised synopsis'
b.dbProxy.updateComponentView(icvp)
ocvps = b.dbProxy.getComponentViews()
ocvp = ocvps[cvName]
self.assertEqual(icvp.name(), ocvp.name())
self.assertEqual(ocvp.synopsis(), 'revised synopsis')
b.dbProxy.deleteComponentView(ocvp.id())
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| failys/CAIRIS | cairis/test/test_ArchitecturalPattern.py | Python | apache-2.0 | 5,840 |
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.sdist import sdist as _sdist
import re
import sys
import time
import codecs
import subprocess
if sys.version < "2.2.3":
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
# Workaround for problems caused by this import
# It's either this or hardcoding the version.
# from pyrax.version import version
with open("pyrax/version.py", "rt") as vfile:
version_text = vfile.read()
vmatch = re.search(r'version ?= ?"(.+)"$', version_text)
version = vmatch.groups()[0]
# When set to '0' this expands in the RPM SPEC file to a unique date-base string
# Set to another value when cutting official release RPMS, then change back to
# zero for the next development cycle
release = '0'
class sdist(_sdist):
""" custom sdist command, to prep pyrax.spec file """
def run(self):
global version
global release
# Create a development release string for later use
git_head = subprocess.Popen("git log -1 --pretty=format:%h",
shell=True,
stdout=subprocess.PIPE).communicate()[0].strip()
date = time.strftime("%Y%m%d%H%M%S", time.gmtime())
git_release = "%sgit%s" % (date, git_head)
# Expand macros in pyrax.spec.in
spec_in = open('pyrax.spec.in', 'r')
spec = open('pyrax.spec', 'w')
for line in spec_in.xreadlines():
if "@VERSION@" in line:
line = line.replace("@VERSION@", version)
elif "@RELEASE@" in line:
# If development release, include date+githash in %{release}
if release.startswith('0'):
release += '.' + git_release
line = line.replace("@RELEASE@", release)
spec.write(line)
spec_in.close()
spec.close()
# Run parent constructor
_sdist.run(self)
# Get the long description from the relevant file
try:
f = codecs.open('README.rst', encoding='utf-8')
long_description = f.read()
f.close()
except:
long_description = ''
testing_requires = ["mock"]
setup(
name="pyrax",
version=version,
description="Python language bindings for OpenStack Clouds.",
long_description=long_description,
author="Rackspace",
author_email="sdk-support@rackspace.com",
url="https://github.com/rackspace/pyrax",
license='Apache License, Version 2.0',
keywords="pyrax rackspace cloud openstack",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
install_requires=[
"python-novaclient>=2.13.0",
"rackspace-novaclient",
"keyring",
"requests>=2.2.1",
"six>=1.5.2",
] + testing_requires,
packages=[
"pyrax",
"pyrax/identity",
],
cmdclass={'sdist': sdist}
)
| rackerlabs/heat-pyrax | setup.py | Python | apache-2.0 | 3,121 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
paddle.dataset.mnist.train()
| PaddlePaddle/cloud | k8s/raw_job/prepare_data.py | Python | apache-2.0 | 670 |
import datetime
from django.db import models
from django.core.exceptions import ValidationError
def validate_year(year):
if year is None:
return # Years can be null
if year < 1800 or year > datetime.datetime.now().year:
raise ValidationError("Not a valid year.")
def validate_day_of_month(day):
if day is None:
return # Days can be null
elif day > 31 or day < 1:
raise ValidationError("Not a valid day.")
class Specimen(models.Model):
"""
A model of a herbarium_data specimen entry.
"""
MONTH_CHOICES = (
(1, "January"),
(2, "February"),
(3, "March"),
(4, "April"),
(5, "May"),
(6, "June"),
(7, "July"),
(8, "August"),
(9, "September"),
(10, "October"),
(11, "November"),
(12, "December")
)
# Generated Attributes
def latin_name(self):
return "{} {}".format(self.genus, self.species)
def date_collected_str(self):
if self.year_collected:
if self.month_collected:
if self.day_collected:
return "{} {}, {}".format(self.get_month_collected_display(), self.day_collected,
self.year_collected)
else:
return "{}, {}".format(self.get_month_collected_display(), self.year_collected)
else:
return self.year_collected
return None
date_collected_str.short_description = "Date Collected"
# Schema Attributes
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
dataset = models.CharField(max_length=50, default="")
genus = models.CharField(max_length=50, default="", db_index=True)
species = models.CharField(max_length=50, default="", db_index=True)
common_name = models.CharField(max_length=255, default="")
dth = models.CharField(max_length=10, default="")
accession = models.CharField(max_length=20, default="")
year_collected = models.PositiveSmallIntegerField(null=True, validators=[validate_year])
month_collected = models.PositiveSmallIntegerField(null=True, choices=MONTH_CHOICES)
day_collected = models.PositiveSmallIntegerField(null=True, validators=[validate_day_of_month])
collectors = models.TextField(default="")
map_included = models.NullBooleanField()
map_reference = models.CharField(max_length=255, default="")
county = models.CharField(max_length=127, default="")
township = models.CharField(max_length=127, default="")
country = models.CharField(max_length=127, default="")
location = models.CharField(max_length=127, default="")
habitat = models.CharField(max_length=127, default="")
notes = models.TextField(default="")
image = models.ImageField(null=True)
def __repr__(self):
return "<Specimen {} | {} {}>".format(self.accession, self.genus, self.species)
def __str__(self):
return "Specimen {}: {} {}".format(self.accession, self.genus, self.species)
| qubs/climate-data-api | herbarium_data/models.py | Python | apache-2.0 | 3,109 |
import os
import chardet
from humanfriendly import format_size
import pygments
import pygments.lexers
import pygments.lexers.special
import pygments.formatters
from pygments.util import ClassNotFound
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.codepygments import settings
from mfr.extensions.codepygments import exceptions
class CodePygmentsRenderer(extension.BaseRenderer):
DEFAULT_LEXER = pygments.lexers.special.TextLexer
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics.add('pygments_version', pygments.__version__)
def render(self):
file_size = os.path.getsize(self.file_path)
if file_size > settings.MAX_SIZE:
raise exceptions.FileTooLargeError(
'Text files larger than {} are not rendered. Please download '
'the file to view.'.format(format_size(settings.MAX_SIZE, binary=True)),
file_size=file_size,
max_size=settings.MAX_SIZE,
extension=self.metadata.ext,
)
with open(self.file_path, 'rb') as fp:
body = self._render_html(fp, self.metadata.ext)
return self.TEMPLATE.render(base=self.assets_url, body=body)
@property
def file_required(self):
return True
@property
def cache_result(self):
return True
def _render_html(self, fp, ext, *args, **kwargs):
"""Generate an html representation of the file
:param fp: File pointer
:param ext: File name extension
:return: Content html
"""
formatter = pygments.formatters.HtmlFormatter()
data = fp.read()
content, encoding = None, 'utf-8'
try:
content = data.decode(encoding)
except UnicodeDecodeError:
detected_encoding = chardet.detect(data)
encoding = detected_encoding.get('encoding', None)
if encoding is None:
raise exceptions.FileDecodingError(
message='Unable to detect encoding of source file.',
extension=ext,
category='undetectable_encoding',
code=400,
)
try:
content = data.decode(encoding)
except UnicodeDecodeError as err:
raise exceptions.FileDecodingError(
message='Unable to decode file as {}.'.format(encoding),
extension=ext,
category='undecodable',
original_exception=err,
code=400,
)
if content is None:
raise exceptions.FileDecodingError(
message='File decoded to undefined using encoding "{}"'.format(encoding),
extension=ext,
category='decoded_to_undefined',
code=500,
)
self.metrics.merge({'encoding': encoding, 'default_lexer': False})
try:
# check if there is a lexer available for more obscure file types
if ext in settings.lexer_lib.keys():
lexer = pygments.lexers.get_lexer_by_name(settings.lexer_lib[ext])
else:
lexer = pygments.lexers.guess_lexer_for_filename(ext, content)
except ClassNotFound:
self.metrics.add('default_lexer', True)
lexer = self.DEFAULT_LEXER()
self.metrics.add('lexer', lexer.name)
return pygments.highlight(content, lexer, formatter)
| CenterForOpenScience/modular-file-renderer | mfr/extensions/codepygments/render.py | Python | apache-2.0 | 3,749 |
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class GakSpider(BaseSpider):
name = 'gak.co.uk'
allowed_domains = ['gak.co.uk']
start_urls = ['http://www.gak.co.uk/']
def parse(self, response):
hxs = HtmlXPathSelector(response)
relative_urls = hxs.select('//div[@class="tabs_menu"]/ul/li/a/@href').extract()
for relative_url in relative_urls:
url = urljoin_rfc('http://www.gak.co.uk/',
relative_url, response.encoding)
yield Request(url, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="snapshot"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'div/a/text()')
relative_url = product.select('div[@class="dsc"]/a/@href').extract()[0]
url = urljoin_rfc('http://www.gak.co.uk/',
relative_url, response.encoding)
loader.add_value('url', url)
price = 0.0
if product.select('div/div/span/text()'):
price = product.select('div/div/span/text()').extract()[0]
loader.add_value('price', price)
yield loader.load_item()
| 0--key/lib | portfolio/Python/scrapy/soundslive/gak_spider.py | Python | apache-2.0 | 1,558 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add_certifications_columns_to_slice
Revision ID: f9847149153d
Revises: 0ca9e5f1dacd
Create Date: 2021-11-03 14:07:09.905194
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
revision = "f9847149153d"
down_revision = "0ca9e5f1dacd"
def upgrade():
with op.batch_alter_table("slices") as batch_op:
batch_op.add_column(sa.Column("certified_by", sa.Text(), nullable=True))
batch_op.add_column(
sa.Column("certification_details", sa.Text(), nullable=True)
)
def downgrade():
with op.batch_alter_table("slices") as batch_op:
batch_op.drop_column("certified_by")
batch_op.drop_column("certification_details")
| apache/incubator-superset | superset/migrations/versions/f9847149153d_add_certifications_columns_to_slice.py | Python | apache-2.0 | 1,499 |
from django.contrib import admin
from cobra.core.loading import get_model
| lyoniionly/django-cobra | src/cobra/apps/summary/admin.py | Python | apache-2.0 | 76 |
#!/usr/bin/env python
import glob
import os
import shlex
import sys
script_dir = os.path.dirname(__file__)
node_root = os.path.normpath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(node_root, 'tools', 'gyp', 'pylib'))
import gyp
# Directory within which we want all generated files (including Makefiles)
# to be written.
output_dir = os.path.join(os.path.abspath(node_root), 'out')
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(node_root, 'node.gyp'))
common_fn = os.path.join(node_root, 'common.gypi')
options_fn = os.path.join(node_root, 'config.gypi')
options_fips_fn = os.path.join(node_root, 'config_fips.gypi')
else:
args.append(os.path.join(os.path.abspath(node_root), 'node.gyp'))
common_fn = os.path.join(os.path.abspath(node_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(node_root), 'config.gypi')
options_fips_fn = os.path.join(os.path.abspath(node_root), 'config_fips.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
if os.path.exists(options_fips_fn):
args.extend(['-I', options_fips_fn])
args.append('--depth=' + node_root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32' and 'ninja' not in args:
# Tell gyp to write the Makefiles into output_dir
args.extend(['--generator-output', output_dir])
# Tell make to write its output into the same dir
args.extend(['-Goutput_dir=' + output_dir])
args.append('-Dcomponent=static_library')
args.append('-Dlibrary=static_library')
gyp_args = list(args)
run_gyp(gyp_args)
| dreamllq/node | tools/gyp_node.py | Python | apache-2.0 | 1,983 |
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api.database.DAL import SQLite
from api.database.db import DB
def db_open(db_in):
if DB.type == "SQLite":
SQLite.db_open(db_in)
def db_close(db_in):
if DB.type == "SQLite":
SQLite.close(db_in)
def db_create_table(db_in, tablename):
if DB.type == "SQLite":
SQLite.db_create_table(db_in, tablename)
def db_insert(db_in, table, dict_in):
if DB.type == "SQLite":
return SQLite.db_insert(db_in, table, dict_in)
def db_get_contents_of_table(db_in, table, rows):
if DB.type == "SQLite":
return SQLite.db_get_contents_of_table(db_in, table, rows)
def db_get_latest_id(db_in, table):
if DB.type == "SQLite":
return SQLite.db_get_latest_id(db_in, table) | dhinakg/BitSTAR | api/database/DAL/__init__.py | Python | apache-2.0 | 1,348 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import atexit
import collections
from collections import OrderedDict
import functools
import multiprocessing.pool
import threading
import time
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
@six.add_metaclass(abc.ABCMeta)
class Aggregator(object):
"""Abstract base class used to aggregate batch-level outputs of a loop.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size * num_batches`.
steps: Total number of steps.
batch_size: Batch size. It is used for validation checks between inputs and
outputs.
results: What to return at the end of the aggregation loop.
"""
def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None):
self.use_steps = use_steps
self.num_samples = num_samples
self.steps = steps
self.batch_size = batch_size
self.results = []
@abc.abstractmethod
def create(self, batch_outs):
"""Creates the initial results from the first batch outputs.
Arguments:
batch_outs: A list of batch-level outputs.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
"""Aggregates batch-level results into total results.
Arguments:
batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if `use_steps`
is `True`.
batch_end: The end index of this batch. Always `None` if `use_steps` is
`True`.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def finalize(self):
"""Prepares the total results to be returned."""
raise NotImplementedError('Must be implemented in subclasses.')
class MetricsAggregator(Aggregator):
"""Aggregator that calculates loss and metrics info.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size*num_batches`.
steps: Total number of steps, ie number of times to iterate over a dataset
to cover all samples.
"""
def __init__(self, use_steps, num_samples=None, steps=None):
super(MetricsAggregator, self).__init__(
use_steps=use_steps,
num_samples=num_samples,
steps=steps,
batch_size=None)
def create(self, batch_outs):
self.results = [0.] * len(batch_outs)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
# Loss.
if self.use_steps:
self.results[0] += batch_outs[0]
else:
self.results[0] += batch_outs[0] * (batch_end - batch_start)
# Metrics (always stateful, just grab current values.)
self.results[1:] = batch_outs[1:]
def finalize(self):
if not self.results:
raise ValueError('Empty training data.')
self.results[0] /= (self.num_samples or self.steps)
class ConcatAggregator(Aggregator):
"""Combine tensor-likes which cannot be merged on the fly.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
"""
def __init__(self, batch_size):
self.composite = None
super(ConcatAggregator, self).__init__(
use_steps=True, num_samples=None, steps=None, batch_size=batch_size)
def create(self, batch_element):
self.composite = composite_tensor_utils.is_composite_or_composite_value(
batch_element)
def aggregate(self, batch_element, batch_start=None, batch_end=None):
# TODO(psv): Add num_samples check here to detect when output batch
# #samples is < batch size and != input batch #samples.
if self.batch_size and self.batch_size < batch_element.shape[0]:
raise ValueError(
'Mismatch between expected batch size and model output batch size. '
'Output shape = {}, expected output shape = shape {}'.format(
batch_element.shape,
(self.batch_size,) + batch_element.shape[1:]))
self.results.append(batch_element)
def finalize(self):
# Special case of single batch inference which skips a copy.
if len(self.results) == 1:
self.results = self.results[0]
elif self.composite:
# TODO(taylorrobie): efficiently concatenate.
results = self.results[0]
for r in self.results[1:]:
results = composite_tensor_utils.append_composite_tensor(results, r)
self.results = results
else:
self.results = np.concatenate(self.results, axis=0)
if isinstance(self.results, ops.EagerTensor):
self.results = self.results._numpy() # pylint: disable=protected-access
_COPY_THREADS = 4
_COPY_POOL = None
def get_copy_pool():
"""Shared threadpool for copying arrays.
Pool instantiation takes ~ 2ms, so a singleton pool is used rather than
creating a pool per SliceAggregator.
Returns:
The global copy threadpool.
"""
global _COPY_POOL
if _COPY_POOL is None:
_COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)
atexit.register(_COPY_POOL.close)
return _COPY_POOL
class SliceAggregator(Aggregator):
"""Combine arrays where the final size is known.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
NumPy copies are an operation that threads handle quite well because all of
the heavy lifting is in c and does not need the GIL. Moreover, we can perform
lock-free writes to the same buffer in multiple threads because the nature of
result aggregation guarantees that either the indices are disjoint or the
aggregator will throw an exception in finalize. Moreover, because aggregation
is performed on the slowest varying dimension, assignments for a given batch
will write to contiguous blocks of memory, further minimizing contention.
There is, however, some scheduling and context switching overhead which will
offset the gains from pipelining the slice assignment. Below a given threshold
it is faster to simply assign in the main thread rather than enqueue the
assignment in a side thread. The exact threshold will vary from system to
system, but the time is not very sensitive to the exact transition so a value
of 2 ** 14 was chosen which should be reasonable on most systems.
"""
_BINARY_SIZE_THRESHOLD = 2 ** 14
_MAX_COPY_SECONDS = 300
def __init__(self, num_samples, batch_size):
self._async_copies = []
self._pool = get_copy_pool()
self._errors = []
super(SliceAggregator, self).__init__(
use_steps=False,
num_samples=num_samples,
steps=None,
batch_size=batch_size)
def create(self, batch_element):
# This step does not need to be pipelined because NumPy empty array
# initialization is effectively instantaneous.
shape = (self.num_samples,) + batch_element.shape[1:]
dtype = batch_element.dtype
if isinstance(batch_element, ops.EagerTensor):
dtype = dtype.as_numpy_dtype
self.results = np.empty(shape=shape, dtype=dtype)
def aggregate(self, batch_element, batch_start, batch_end):
# Fail early.
if self._errors:
six.reraise(type(self._errors[0]), self._errors[0])
# In the special case of single batch inference, no copy is needed.
if batch_end - batch_start == self.num_samples:
if self.num_samples != batch_element.shape[0]:
raise ValueError(
'Mismatch between expected batch size and model output batch size. '
'Output shape = {}, expected output shape = shape {}'.format(
batch_element.shape, self.results.shape))
self.results = batch_element
return
# This is an approximate threshold, so we don't need to consider the number
# of bytes per element.
num_elements = np.prod(batch_element.shape)
if num_elements < self._BINARY_SIZE_THRESHOLD:
self.results[batch_start:batch_end] = batch_element
else:
is_finished = threading.Event()
self._pool.apply_async(
self._slice_assign,
args=(batch_element, batch_start, batch_end, is_finished))
self._async_copies.append(is_finished)
def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):
try:
self.results[batch_start:batch_end] = batch_element
except Exception as e: # pylint: disable=broad-except
# `_slice_assign` should only be called in threads and exceptions raised
# in threads do not carry over to the main thread. So instead we perform a
# a broad catch in the thread and then store the exception to be re-raised
# in the main thread.
self._errors.append(e)
finally:
is_finished.set()
def finalize(self):
start_time = time.time()
for is_finished in self._async_copies:
timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)])
if not is_finished.wait(timeout):
raise ValueError('Timed out waiting for copy to complete.')
if self._errors:
six.reraise(self._errors[0].__class__, self._errors[0])
class OutputsAggregator(Aggregator):
"""Aggregator that concatenates outputs."""
_structure = None
def create(self, batch_outs):
# SparseTensorValue is a named tuple which nest will flatten, so we need
# to guard it to properly handle the structure.
self._structure = nest.get_traverse_shallow_structure(
lambda x: not composite_tensor_utils.is_composite_or_composite_value(x),
batch_outs)
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element in batch_outs:
if composite_tensor_utils.is_composite_or_composite_value(batch_element):
# If the output is not a ndarray, it will be either a composite tensor
# or a composite tensor's Value object. In either case, we can't
# allocate an array to hold the object - we'll handle it later.
self.results.append(ConcatAggregator(self.batch_size))
elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)):
self.results.append(
(ConcatAggregator(self.batch_size) if self.use_steps else
SliceAggregator(self.num_samples, self.batch_size)))
else:
# This is not a ndarray, a CompositeTensor, or a CompositeTensorValue.
# Fail fast rather than trying to concatenate it.
raise RuntimeError('Attempted to aggregate unsupported object {}.'
.format(batch_element))
self.results[-1].create(batch_element)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element, result in zip(batch_outs, self.results):
result.aggregate(batch_element, batch_start, batch_end)
def finalize(self):
for result in self.results:
result.finalize()
self.results = [i.results for i in self.results]
self.results = nest.pack_sequence_as(self._structure, self.results)
def get_progbar(model, count_mode, include_metrics=True):
"""Get Progbar."""
if include_metrics:
stateful_metric_names = getattr(model, 'metrics_names', None)
if stateful_metric_names:
stateful_metric_names = stateful_metric_names[1:] # Exclude `loss`
else:
stateful_metric_names = None
return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names)
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Arguments:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished. Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None and batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x, expected_shape=None):
"""Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1."""
if x is None:
return None
if composite_tensor_utils.is_composite_or_composite_value(x):
return x
if isinstance(x, int):
raise ValueError(
'Expected an array data type but received an integer: {}'.format(x))
if (x.shape is not None and len(x.shape) == 1 and
(expected_shape is None or len(expected_shape) != 1)):
if tensor_util.is_tensor(x):
x = array_ops.expand_dims(x, axis=1)
else:
x = np.expand_dims(x, 1)
return x
def standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that the batch axis of the
arrays matches the expected value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
try:
data_len = len(data)
except TypeError:
# For instance if data is `None` or a symbolic Tensor.
data_len = None
if not names:
if data_len and not isinstance(data, dict):
raise ValueError(
'Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
try:
data = [
data[x].values
if data[x].__class__.__name__ == 'DataFrame' else data[x]
for x in names
]
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
elif isinstance(data, (list, tuple)):
if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
else:
data = [
x.values if x.__class__.__name__ == 'DataFrame' else x for x in data
]
else:
data = data.values if data.__class__.__name__ == 'DataFrame' else data
data = [data]
if shapes is not None:
data = [
standardize_single_array(x, shape) for (x, shape) in zip(data, shapes)
]
else:
data = [standardize_single_array(x) for x in data]
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' + exception_prefix +
': the list of Numpy arrays that you are passing to '
'your model is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), ' +
'for inputs ' + str(names) + ' but instead got the '
'following list of ' + str(len(data)) + ' arrays: ' +
str(data)[:200] + '...')
elif len(names) > 1:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as input to your model, '
'but the model expects a list of ' + str(len(names)) +
' Numpy arrays instead. The list you passed was: ' +
str(data)[:200])
elif len(data) == 1 and not hasattr(data[0], 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, or list/dict of '
'Numpy arrays. Found: ' + str(data)[:200] + '...')
elif len(names) == 1:
data = [np.asarray(data)]
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is not None:
if tensor_util.is_tensor(data[i]):
tensorshape = data[i].shape
if not tensorshape:
continue
data_shape = tuple(tensorshape.as_list())
elif composite_tensor_utils.is_composite_or_composite_value(data[i]):
tensorshape = composite_tensor_utils.get_shape(data[i])
data_shape = tuple(tensorshape.as_list())
else:
data_shape = data[i].shape
shape = shapes[i]
if len(data_shape) != len(shape):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have ' +
str(len(shape)) + ' dimensions, but got array '
'with shape ' + str(data_shape))
if not check_batch_axis:
data_shape = data_shape[1:]
shape = shape[1:]
for dim, ref_dim in zip(data_shape, shape):
if ref_dim != dim and ref_dim is not None and dim is not None:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shape) + ' but got array with shape ' +
str(data_shape))
return data
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or (isinstance(x_weight, (list, tuple)) and
len(x_weight) == 0): # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, (list, tuple)):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, collections.Mapping):
generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list or a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes,
check_all_flat=False):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(nest.flatten(sample_weights)))
nest.assert_same_structure(
list_to_tuple(outputs),
list_to_tuple(nest.flatten(outputs)))
if sample_weight_modes is not None:
nest.assert_same_structure(
sample_weight_modes, nest.flatten(sample_weight_modes))
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else array_ops.shape(output)
is_temporal = (
sample_weight_modes is not None and
sample_weight_modes[i] == 'temporal')
sw_shape = (output_shape[0],
output_shape[1]) if is_temporal else (output_shape[0],)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))
else:
new_sample_weights.append(sw)
return (list_to_tuple(new_sample_weights),
any_sample_weight, partial_sample_weight)
def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def is_tensor_or_composite_tensor(x):
return tensor_util.is_tensor(
x) or composite_tensor_utils.is_composite_or_composite_value(x)
def set_of_lengths(x):
# Returns a set with the variation between
# different shapes, with None => 0
if x is None:
return {}
else:
return set([
y.shape[0]
for y in x
if y is not None and not is_tensor_or_composite_tensor(y)
])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_loss_fns = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,
losses.CategoricalCrossentropy)
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if y is None or loss is None or tensor_util.is_tensor(y):
continue
if losses.is_categorical_crossentropy(loss):
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' +
str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)
if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and
(loss.fn in key_loss_fns))):
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
loss_name = loss.name
if loss_name is None:
loss_type = loss.fn if is_loss_wrapper else type(loss)
loss_name = loss_type.__name__
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss_name + '`. '
'This loss expects targets to have the same shape '
'as the output.')
def collect_per_output_metric_info(metrics,
output_names,
output_shapes,
loss_fns,
is_weighted=False):
"""Maps metric names and functions to model outputs.
Arguments:
metrics: a list or a list of lists or a dict of metric functions.
output_names: a list of the names (strings) of model outputs.
output_shapes: a list of the shapes (strings) of model outputs.
loss_fns: a list of the loss functions corresponding to the model outputs.
is_weighted: Boolean indicating whether the given metrics are weighted.
Returns:
A list (one entry per model output) of dicts.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like: `[{
'acc': binary_accuracy(),
'ce': binary_crossentropy(),
}, {
'acc': binary_accuracy(),
}]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [{} for _ in output_names]
if isinstance(metrics, list):
any_sub_list = any(isinstance(m, list) for m in metrics)
if any_sub_list:
if len(metrics) != len(output_names):
raise ValueError('When passing a list of lists as `metrics`, '
'it should have one entry per model output. '
'The model has ' + str(len(output_names)) +
' outputs, but you passed metrics=' + str(metrics))
# User has provided a list of len = len(outputs).
nested_metrics = [generic_utils.to_list(m) for m in metrics]
else:
# If it is a single list we then apply all metrics to all outputs.
if len(output_names) > 1:
nested_metrics = []
for _ in output_names:
nested_metrics.append(
[metrics_module.clone_metric(m) for m in metrics])
else:
nested_metrics = [metrics]
elif isinstance(metrics, collections.Mapping):
generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)
nested_metrics = []
for name in output_names:
output_metrics = generic_utils.to_list(metrics.get(name, []))
nested_metrics.append(output_metrics)
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
per_output_metrics = []
for i, metrics in enumerate(nested_metrics):
metrics_dict = OrderedDict()
for metric in metrics:
metric_name = get_metric_name(metric, is_weighted)
metric_fn = get_metric_function(
metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])
# If the metric function is not stateful, we create a stateful version.
if not isinstance(metric_fn, metrics_module.Metric):
metric_fn = metrics_module.MeanMetricWrapper(
metric_fn, name=metric_name)
metrics_dict[metric_name] = metric_fn
per_output_metrics.append(metrics_dict)
return per_output_metrics
def batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Arguments:
y: Numpy array or Tensor of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
# Iterator may return sample_weight as 1-tuple
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != 'samplewise':
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape {}. In order to '
'use timestep-wise sample weights, you should specify '
'sample_weight_mode="temporal" in compile(); found "{}" '
'instead. If you just mean to use sample-wise weights, '
'make sure your sample_weight array is 1D.'
.format(sample_weight.shape, sample_weight_mode))
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if (not tensor_util.is_tensor(sample_weight) and
y.shape[:sample_weight.ndim] != sample_weight.shape):
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
# Class weights applied per-sample.
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if tensor_util.is_tensor(y):
# Few classes are expected, so densifying is reasonable.
keys = np.array(sorted(class_weight.keys()))
values = np.array([class_weight[i] for i in keys])
weight_vector = np.zeros(np.max(keys) + 1)
weight_vector[:] = np.nan
weight_vector[keys] = values
y_classes = smart_cond.smart_cond(
len(y.shape.as_list()) == 2 and K.shape(y)[1] > 1,
lambda: K.argmax(y, axis=1),
lambda: math_ops.cast(K.reshape(y, (-1,)), dtypes.int64)
)
class_sample_weight = array_ops.gather(weight_vector, y_classes)
gen_array_ops.check_numerics(
class_sample_weight,
'Invalid classes or class weights detected. NaN values indicate that '
'an appropriate class weight could not be determined.')
class_sample_weight = math_ops.cast(class_sample_weight, K.floatx())
if sample_weight is not None:
sample_weight = math_ops.cast(
ops.convert_to_tensor_v2(sample_weight), K.floatx())
else:
y_classes = y
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
class_sample_weight = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(class_sample_weight) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError(
'`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' % (existing_classes - existing_class_weight))
if class_sample_weight is not None and sample_weight is not None:
# Multiply weights if both are provided.
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None
def has_symbolic_tensors(ls):
if context.executing_eagerly():
return False
return has_tensors(ls)
def has_tensors(ls):
if isinstance(ls, (list, tuple)):
return any(tensor_util.is_tensor(v) for v in ls)
if isinstance(ls, dict):
return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls))
return tensor_util.is_tensor(ls)
def get_metric_name(metric, weighted=False):
"""Returns the name corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
weighted: Boolean indicating if the given metric is weighted.
Returns:
The metric name.
"""
if tf2.enabled():
# We keep the string that the user has set in compile as the metric name.
if isinstance(metric, six.string_types):
return metric
metric = metrics_module.get(metric)
return metric.name if hasattr(metric, 'name') else metric.__name__
else:
metric_name_prefix = 'weighted_' if weighted else ''
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, 'name'):
suffix = metric_fn.name
else:
suffix = metric_fn.__name__
metric_name = metric_name_prefix + suffix
return metric_name
def get_metric_function(metric, output_shape=None, loss_fn=None):
"""Returns the metric function corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be calculated
for.
loss_fn: The loss function used.
Returns:
The metric function.
"""
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
return metrics_module.get(metric)
is_sparse_categorical_crossentropy = (
isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.sparse_categorical_crossentropy))
is_binary_crossentropy = (
isinstance(loss_fn, losses.BinaryCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.binary_crossentropy))
if metric in ['accuracy', 'acc']:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_accuracy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_accuracy
# If the output_shape[-1] is not 1, then we know output is `categorical`.
# We assume it is sparse categorical only if loss is explicitly given
# as sparse categorical crossentropy loss.
return metrics_module.categorical_accuracy
else:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_crossentropy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_crossentropy
return metrics_module.categorical_crossentropy
def call_metric_function(metric_fn,
y_true,
y_pred=None,
weights=None,
mask=None):
"""Invokes metric function and returns the metric result tensor."""
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
if weights is None:
# Use mask as sample weight.
weights = mask
else:
# Update dimensions of weights to match with mask.
weights = math_ops.cast(weights, dtype=y_pred.dtype)
mask, _, weights = tf_losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights)
weights *= mask
if y_pred is not None:
return metric_fn(y_true, y_pred, sample_weight=weights)
# `Mean` metric only takes a single value.
return metric_fn(y_true, sample_weight=weights)
def get_loss_function(loss):
"""Returns the loss corresponding to the loss input in `compile` API."""
if loss is None or isinstance(loss, losses.Loss):
return loss
if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):
# It is not safe to assume that the loss takes no constructor arguments.
raise ValueError(
'Received uninstantiated Loss class: {}\nPlease call loss ""classes '
'before passing them to Model.compile.'.format(loss))
# Deserialize loss configuration, if needed.
if isinstance(loss, collections_abc.Mapping):
loss = losses.get(loss)
# Custom callable class.
if callable(loss) and not hasattr(loss, '__name__'):
return loss
# Wrap loss function with signature `(y_true, y_pred, **kwargs)`
# in `LossFunctionWrapper` class.
loss_fn = losses.get(loss)
# For losses which are given as strings/functions in the compile API,
# we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`
# (both in distribution strategy context and otherwise).
return losses.LossFunctionWrapper(
loss_fn,
name=loss_fn.__name__,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)
class RespectCompiledTrainableState(object):
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returing from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs this
set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access
self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access
# Check to see if any layer's trainable state has changed since `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (layer in self._current_trainable_state and
trainable != self._current_trainable_state[layer]):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access
return False # False values do not suppress exceptions
def validate_dataset_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed.
Arguments:
x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to weight
the importance of each sample in `x`. Expected to be `None` when `x` is a
dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to be
used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user.
"""
if y is not None:
raise ValueError('You passed a dataset or dataset iterator (%s) as '
'input `x` to your model. In that case, you should '
'not specify a target (`y`) argument, since the dataset '
'or dataset iterator generates both input data and '
'target data. '
'Received: %s' % (x, y))
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator. Instead, you'
'can provide sample_weight as the third element of your'
'dataset, i.e. (inputs, targets, sample_weight). '
'Received: x=%s, sample_weight=%s' % (x, sample_weight))
if validation_split is not None and validation_split != 0.0:
raise ValueError(
'`validation_split` argument is not supported when '
'input `x` is a dataset or a dataset iterator. '
'Received: x=%s, validation_split=%f' % (x, validation_split))
def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):
"""Helper function to validate either inputs or targets."""
if isinstance(inp, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in inp):
raise ValueError(
'Please provide as model inputs either a single array or a list of '
'arrays. You passed: {}={}'.format(field_name, str(orig_inp)))
elif isinstance(inp, dict):
if not allow_dict:
raise ValueError(
'You cannot pass a dictionary as model {}.'.format(field_name))
elif not isinstance(inp, np.ndarray) and not tensor_util.is_tensor(inp):
raise ValueError(
'Please provide as model inputs either a single array or a list of '
'arrays. You passed: {}={}'.format(field_name, orig_inp))
def check_generator_arguments(y=None, sample_weight=None,
validation_split=None):
"""Validates arguments passed when using a generator."""
if y is not None:
raise ValueError('`y` argument is not supported when data is'
'a generator or Sequence instance. Instead pass targets'
' as the second element of the generator.')
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when data is'
'a generator or Sequence instance. Instead pass sample'
' weights as the third element of the generator.')
if validation_split:
raise ValueError('If your data is in the form of a Python generator, '
'you cannot use `validation_split`.')
def check_steps_argument(input_data, steps, steps_name):
"""Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Arguments:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
"""
is_x_iterator = isinstance(
input_data, (iterator_ops.Iterator, iterator_ops.OwnedIterator))
if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or
(isinstance(input_data, list) and not input_data)):
if steps is None:
input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'
raise ValueError('When using {input_type} as input to a model, you should'
' specify the `{steps_name}` argument.'.format(
input_type=input_type_str, steps_name=steps_name))
return True
if isinstance(input_data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
return True
if steps is not None:
list_types = (np.ndarray, list, tuple)
if (isinstance(input_data, list_types) or
(isinstance(input_data, dict) and
any(isinstance(v, list_types) for v in input_data.values()))):
logging.warning('When passing input data as arrays, do not specify '
'`steps_per_epoch`/`steps` argument. '
'Please use `batch_size` instead.')
return False
def cast_single_tensor(x, dtype=None):
if isinstance(x, np.ndarray):
x = ops.convert_to_tensor_v2(x)
dtype = dtype or K.floatx()
if x.dtype.is_floating:
return math_ops.cast(x, dtype=dtype)
return x
def cast_if_floating_dtype_and_mismatch(targets, outputs):
"""Returns target data tensors using correct datatype.
Checks that each target and output pair are the same datatype. If not, casts
the target to the output's datatype.
Args:
targets: tensor or list of targets.
outputs: tensor or list of outputs.
Returns:
Targets in appropriate datatype.
"""
if tensor_util.is_tensor(targets):
# There is one target, so output[0] should be the only output.
return cast_single_tensor(targets, dtype=outputs[0].dtype)
new_targets = []
for target, out in zip(targets, outputs):
if isinstance(target, np.ndarray):
target = ops.convert_to_tensor_v2(target)
if target.dtype != out.dtype:
new_targets.append(cast_single_tensor(target, dtype=out.dtype))
else:
new_targets.append(target)
return new_targets
def cast_if_floating_dtype(x, dtype=None):
"""Casts the given data tensors to the default floating point type.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
dtype: The dtype to which Tensors should be cast.
Returns:
Converted input.
"""
return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype),
x)
def cast_to_model_input_dtypes(x, model):
"""Casts the given data tensors to the dtypes of the model inputs.
Args:
x: tensor or list/tuple of tensors.
model: The model.
Returns:
Converted input. Each tensor is casted to the corresponding input in
`model.inputs`.
"""
input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs)
return nest.map_structure(math_ops.cast, x, input_dtypes)
def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):
"""Prepares sample weight modes for the model.
Args:
training_endpoints: List of model _TrainingEndpoints.
sample_weight_mode: sample weight mode user input passed from compile API.
Raises:
ValueError: In case of invalid `sample_weight_mode` input.
"""
if isinstance(sample_weight_mode, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'sample_weight_mode', sample_weight_mode,
[e.output_name for e in training_endpoints])
for end_point in training_endpoints:
if not end_point.should_skip_target_weights():
if end_point.output_name not in sample_weight_mode:
raise ValueError('Output ' + end_point.output_name +
'missing from `_sample_weight_modes` dictionary')
else:
end_point.sample_weight_mode = sample_weight_mode.get(
end_point.output_name)
elif isinstance(sample_weight_mode, (list, tuple)):
if len(sample_weight_mode) != len(training_endpoints):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed ' +
str(len(sample_weight_mode)) + '_sample_weight_modes.')
for mode, endpoint in zip(sample_weight_mode, training_endpoints):
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = mode
else:
for endpoint in training_endpoints:
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = sample_weight_mode
def prepare_loss_functions(loss, output_names):
"""Converts loss to a list of loss functions.
Arguments:
loss: String (name of objective function), objective function or
`tf.losses.Loss` instance. See `tf.losses`. If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be minimized by
the model will then be the sum of all individual losses.
output_names: List of model output names.
Returns:
A list of loss objective functions.
Raises:
ValueError: If loss is a dict with keys not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if isinstance(loss, collections_abc.Mapping):
generic_utils.check_for_unexpected_keys('loss', loss, output_names)
loss_functions = []
for name in output_names:
if name not in loss:
logging.warning(
'Output {0} missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to {0}.'.format(name))
loss_functions.append(get_loss_function(loss.get(name, None)))
elif isinstance(loss, six.string_types):
loss_functions = [get_loss_function(loss) for _ in output_names]
elif isinstance(loss, collections_abc.Sequence):
if len(loss) != len(output_names):
raise ValueError('When passing a list as loss, it should have one entry '
'per model outputs. The model has {} outputs, but you '
'passed loss={}'.format(len(output_names), loss))
loss_functions = nest.map_structure(get_loss_function, loss)
else:
loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]
return loss_functions
def prepare_loss_weights(training_endpoints, loss_weights=None):
"""Converts loss weights to a list of loss weights.
The result loss weights will be populated on the training endpoint.
Arguments:
training_endpoints: List of model training endpoints.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then be
the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients. If a list, it is expected to have a 1:1
mapping to the model's outputs. If a dict, it is expected to map
output names (strings) to scalar coefficients.
Raises:
ValueError: If loss weight is a dict with key not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if loss_weights is None:
for e in training_endpoints:
e.loss_weight = 1.
elif isinstance(loss_weights, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'loss_weights', loss_weights,
[e.output_name for e in training_endpoints])
for e in training_endpoints:
e.loss_weight = loss_weights.get(e.output_name, 1.)
elif isinstance(loss_weights, list):
if len(loss_weights) != len(training_endpoints):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
for w, e in zip(loss_weights, training_endpoints):
e.loss_weight = w
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# TODO(rohanj): This is a hack to get around not depending on feature_column and
# create a cyclical dependency. Figure out a cleaner solution
def is_feature_layer(layer):
"""Returns whether `layer` is a FeatureLayer or not."""
return getattr(layer, '_is_feature_layer', False)
def is_eager_dataset_or_iterator(data):
return context.executing_eagerly() and isinstance(
data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.OwnedIterator))
# pylint: disable=protected-access
def assert_not_batched(dataset):
"""Asserts that `dataset` is not batched.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is batched.
Example usage:
```python
try:
assert_not_batched(dataset)
# safe to assume `dataset` it not batched here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_batched(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.ShuffleDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_batched(input_dataset)
return
raise ValueError('Could not assert that dataset is not batched.')
# pylint: disable=protected-access
def assert_not_shuffled(dataset):
"""Asserts that `dataset` is not shuffled.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is shuffled.
Example usage:
```python
try:
assert_not_shuffled(dataset)
# safe to assume `dataset` it not shuffled here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_shuffled(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.BatchDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.PaddedBatchDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.WindowDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_shuffled(input_dataset)
return
raise ValueError('Could not assert that dataset is not shuffled.')
def verify_dataset_shuffled(x):
"""Verifies that the dataset is shuffled.
Args:
x: Dataset passed as an input to the model.
Raises:
ValueError: if the dataset is not already shuffled.
"""
assert isinstance(x, dataset_ops.DatasetV2)
try:
assert_not_shuffled(x)
except ValueError:
# Dataset may or may not be shuffled.
return
else:
logging.warning('Expected a shuffled dataset but input dataset `x` is '
'not shuffled. Please invoke `shuffle()` on input dataset.')
def is_dataset_or_iterator(data):
return isinstance(data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator, iterator_ops.OwnedIterator))
def get_iterator(dataset):
"""Create and initialize an iterator from a dataset."""
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(dataset)
else:
iterator = dataset_ops.make_initializable_iterator(dataset)
initialize_iterator(iterator)
return iterator
def initialize_iterator(iterator):
if not context.executing_eagerly():
init_op = iterator.initializer
K.get_session((init_op,)).run(init_op)
def extract_tensors_from_dataset(dataset):
"""Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.
Arguments:
dataset: Dataset instance.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
def unpack_iterator_input(iterator):
"""Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.
Arguments:
iterator: Instance of a dataset iterator.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
try:
next_element = iterator.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3 '
'elements: (input, target) or (input, target, sample_weights) '
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
weights = None
else:
x, y, weights = next_element
else:
x = next_element
y = None
weights = None
return x, y, weights
def infer_steps_for_dataset(model,
dataset,
steps,
epochs=1,
steps_name='steps'):
"""Infers steps_per_epoch needed to loop through a dataset.
Arguments:
model: Keras model instance.
dataset: Input data of type tf.data.Dataset.
steps: Number of steps to draw from the dataset (may be None if unknown).
epochs: Number of times to iterate over the dataset.
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
Returns:
Integer or `None`. Inferred number of steps to loop through the dataset.
`None` is returned if 1) the size of the dataset is unknown and `steps` was
not specified, or 2) this is multi-worker training and auto sharding is
enabled.
Raises:
ValueError: In case of invalid argument values.
"""
assert isinstance(dataset, dataset_ops.DatasetV2)
if (model._in_multi_worker_mode() and
(dataset.options().experimental_distribute.auto_shard_policy !=
AutoShardPolicy.OFF)):
# If the dataset would be auto-sharded, we should not infer a local
# steps_per_epoch due to the possible inbalanced sharding between workers.
return None
size = K.get_value(cardinality.cardinality(dataset))
if size == cardinality.INFINITE and steps is None:
raise ValueError('When passing an infinitely repeating dataset, you '
'must specify the `%s` argument.' % (steps_name,))
if size >= 0:
if steps is not None and steps * epochs > size:
if epochs > 1:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `epochs=%s` and `%s=%s`, which is a total of '
'%s steps. We cannot draw that many steps from this '
'dataset. We suggest to set `%s=%s`.' %
(size, epochs, steps_name, steps, steps * epochs,
steps_name, size // epochs))
else:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `%s=%s`. We cannot draw that many steps from '
'this dataset. We suggest to set `%s=%s`.' %
(size, steps_name, steps, steps_name, size))
if steps is None:
if size >= 0:
return size
return None
return steps
class ModelInputs(object):
"""Encapsulates model inputs.
Allows for transforming model inputs while keeping the same structure.
"""
def __init__(self, inputs):
self._inputs = inputs
self._is_dict = isinstance(self._inputs, dict)
self._is_single_input = not isinstance(self._inputs, (list, tuple, dict))
self._flattened_inputs = []
self._input_names = []
if self._is_dict:
for k in sorted(self._inputs.keys()):
self._flattened_inputs.append(self._inputs[k])
self._input_names.append(k)
else:
self._flattened_inputs = nest.flatten(self._inputs)
self._input_names = [
'input_%d' % (i + 1) for i in range(len(self._flattened_inputs))
]
def get_input_names(self):
"""Returns keys to name inputs by.
In case inputs provided were a list, tuple or single entry, we make up a
key 'input_%d'. For dictionary case, we return a sorted list of keys.
"""
return self._input_names
def get_symbolic_inputs(self, return_single_as_list=False):
"""Returns inputs to be set as self.inputs for a model."""
# TODO(karmel): There is a side-effect here where what you get
# with as_list and as_dict depends on whether you have called this
# method first, since it modifies in place.
for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):
if isinstance(v, (list, float, int)):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray, ops.EagerTensor)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + tuple(v.shape[1:])
if shape == (None,):
shape = (None, 1)
dtype = dtypes.as_dtype(v.dtype)
if dtype.is_floating:
dtype = K.floatx()
v = K.placeholder(shape=shape, name=k, dtype=dtype)
elif isinstance(v, tensor_spec.TensorSpec):
shape = (None,) + tuple(v.shape.as_list()[1:])
if shape == (None,):
shape = (None, 1)
v = K.placeholder(shape=shape, name=k, dtype=v.dtype)
self._flattened_inputs[i] = v
if self._is_dict:
return dict(zip(self._input_names, self._flattened_inputs))
if self._is_single_input and not return_single_as_list:
return self._flattened_inputs[0]
return self._flattened_inputs
def as_dict(self):
"""An iterable over a dictionary version of inputs."""
for k, v in zip(self._input_names, self._flattened_inputs):
yield k, v
def as_list(self):
"""Returning the inputs as a list."""
return self._flattened_inputs
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if hasattr(layer, '_batch_input_shape'):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Arguments:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.as_dimension(batch_input_shape[0]).value
return None
def generic_output_names(outputs_list):
return ['output_%d' % (i + 1) for i in range(len(outputs_list))]
def convert_eager_tensors_to_numpy(structure):
"""Convert every EagerTensor in `structure` to NumPy.
Arguments:
structure: An arbitrary structure of elements to be converted to NumPy
arrays.
Returns:
An identical structure with EagerTensors converted to NumPy arrays.
"""
def _convert(element):
if isinstance(element, ops.EagerTensor):
return element.numpy()
return element
return nest.map_structure(_convert, structure)
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch.
Arguments:
validation_freq: Integer or list. If an integer, specifies how many training
epochs to run before a new validation run is performed. If a list,
specifies the epochs on which to run validation.
epoch: Integer, the number of the training epoch just completed.
Returns:
Bool, True if validation should be run.
Raises:
ValueError: if `validation_freq` is an Integer and less than 1, or if
it is neither an Integer nor a Sequence.
"""
# `epoch` is 0-indexed internally but 1-indexed in the public API.
one_indexed_epoch = epoch + 1
if isinstance(validation_freq, int):
if validation_freq < 1:
raise ValueError('`validation_freq` can not be less than 1.')
return one_indexed_epoch % validation_freq == 0
if not isinstance(validation_freq, collections_abc.Container):
raise ValueError('`validation_freq` must be an Integer or '
'`collections_abc.Container` (e.g. list, tuple, etc.)')
return one_indexed_epoch in validation_freq
def split_training_and_validation_data(x, y, sample_weights, validation_split):
"""Split input data into train/eval section based on validation_split."""
if has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (generic_utils.slice_arrays(x, 0, split_at),
generic_utils.slice_arrays(x, split_at))
y, val_y = (generic_utils.slice_arrays(y, 0, split_at),
generic_utils.slice_arrays(y, split_at))
if sample_weights:
sample_weights, val_sample_weights = (
generic_utils.slice_arrays(sample_weights, 0, split_at),
generic_utils.slice_arrays(sample_weights, split_at),
)
else:
val_sample_weights = None
return x, y, sample_weights, val_x, val_y, val_sample_weights
def unpack_validation_data(validation_data, raise_if_ambiguous=True):
"""Unpack validation data based input type.
The validation data is not touched if its dataset or dataset iterator.
For other type of input (Numpy or tensor), it will be unpacked into tuple of
3 which is x, y and sample weights.
Args:
validation_data: dataset, dataset iterator, or numpy, tensor tuple.
raise_if_ambiguous: boolean on whether to fail if validation_data cannot be
parsed. Otherwise simply return validation_data, None, None and defer the
decision to the caller.
Returns:
tuple of 3, (x, y, sample_weights) for numpy and tensor input.
"""
if (isinstance(validation_data, (iterator_ops.Iterator,
iterator_ops.OwnedIterator,
dataset_ops.DatasetV2,
data_utils.Sequence))
or not hasattr(validation_data, '__len__')):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
try:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
elif len(validation_data) == 3:
try:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
else:
if raise_if_ambiguous:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
val_x, val_y, val_sample_weight = validation_data, None, None
return val_x, val_y, val_sample_weight
class TrainingLoop(object):
"""TrainingLoop is a wrapper class around the training logic.
This class is trying to encapsulate the different logic of fit/eval/predict
with regard to different data input and model condition.
Note that TrainingLoop is stateless, which means it doesn't contain any
internal field and can be reused with different model and inputs.
"""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
"""Train the model with the inputs and targets."""
raise NotImplementedError()
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
"""Returns the loss value & metrics values for the model in test mode."""
raise NotImplementedError()
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
raise NotImplementedError()
| xzturn/tensorflow | tensorflow/python/keras/engine/training_utils.py | Python | apache-2.0 | 81,689 |
import numpy as np
import copy
from copy import deepcopy
#normalize the list into range [-1,1]
def linear_normalization(data):
normalization = deepcopy(data)
for index, datai in enumerate(normalization):
datai = float(datai)
normalization[index] = datai
max = np.max(normalization)
min = np.min(normalization)
for index, datai in enumerate(data):
datai = 2*(datai-min)/(max-min)-1
normalization[index] = datai
return normalization
#normalize the list by substracting mean and divided by standard deviation (zscore)
def z_score(data):
normalization = deepcopy(data)
average = np.mean(normalization)
std = np.std(normalization)
for index, datai in enumerate(normalization):
datai = (datai-average)/std
normalization[index] = datai
return normalization
samplelist = [1, 2, 3, 4, 5]
linearsample = linear_normalization(samplelist)
zscoresample = z_score(samplelist)
| kinglogxzl/rqalpha | build/lib/rqalpha/data/dtsk_python_interface/utility/normalization.py | Python | apache-2.0 | 958 |
import binascii
import StringIO
class PKCS7Encoder(object):
'''
RFC 2315: PKCS#7 page 21
Some content-encryption algorithms assume the
input length is a multiple of k octets, where k > 1, and
let the application define a method for handling inputs
whose lengths are not a multiple of k octets. For such
algorithms, the method shall be to pad the input at the
trailing end with k - (l mod k) octets all having value k -
(l mod k), where l is the length of the input. In other
words, the input is padded at the trailing end with one of
the following strings:
01 -- if l mod k = k-1
02 02 -- if l mod k = k-2
.
.
.
k k ... k k -- if l mod k = 0
The padding can be removed unambiguously since all input is
padded and no padding string is a suffix of another. This
padding method is well-defined if and only if k < 256;
methods for larger k are an open issue for further study.
'''
def __init__(self, k=16):
self.k = k
## @param text The padded text for which the padding is to be removed.
# @exception ValueError Raised when the input padding is missing or corrupt.
def decode(self, text):
'''
Remove the PKCS#7 padding from a text string
'''
nl = len(text)
val = int(binascii.hexlify(text[-1]), 16)
if val > self.k:
raise ValueError('Input is not padded or padding is corrupt')
l = nl - val
return text[:l]
## @param text The text to encode.
def encode(self, text):
'''
Pad an input string according to PKCS#7
'''
l = len(text)
output = StringIO.StringIO()
val = self.k - (l % self.k)
for _ in xrange(val):
output.write('%02x' % val)
return text + binascii.unhexlify(output.getvalue())
| mooosu/python-utils | encrypt/pkcs7.py | Python | apache-2.0 | 1,944 |
"""Listens to Treadmill server events.
There is single event manager process per server node.
Each server subscribes to the content of /servers/<servername> Zookeeper node.
The content contains the list of all apps currently scheduled to run on the
server.
Applications that are scheduled to run on the server are mirrored in the
'cache' directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import io
import logging
import os
import time
import kazoo
import kazoo.client
from treadmill import appenv
from treadmill import context
from treadmill import fs
from treadmill import sysinfo
from treadmill import utils
from treadmill import yamlwrapper as yaml
from treadmill import zknamespace as z
from treadmill import zkutils
_LOGGER = logging.getLogger(__name__)
_HEARTBEAT_SEC = 30
_WATCHDOG_TIMEOUT_SEC = _HEARTBEAT_SEC * 4
READY_FILE = '.ready'
class EventMgr:
"""Mirror Zookeeper scheduler event into node app cache events."""
__slots__ = (
'tm_env',
'_hostname',
)
def __init__(self, root):
_LOGGER.info('init eventmgr: %s', root)
self.tm_env = appenv.AppEnvironment(root=root)
self._hostname = sysinfo.hostname()
@property
def name(self):
"""Name of the EventMgr service.
"""
return self.__class__.__name__
def run(self, once=False):
"""Establish connection to Zookeeper and subscribes to node events."""
# Setup the watchdog
watchdog_lease = self.tm_env.watchdogs.create(
name='svc-{svc_name}'.format(svc_name=self.name),
timeout='{hb:d}s'.format(hb=_WATCHDOG_TIMEOUT_SEC),
content='Service %r failed' % self.name
)
# Start the timer
watchdog_lease.heartbeat()
zkclient = context.GLOBAL.zk.conn
zkclient.add_listener(zkutils.exit_on_lost)
presence_ready = zkclient.handler.event_object()
presence_ready.clear()
placement_ready = zkclient.handler.event_object()
placement_ready.clear()
def _is_ready():
return presence_ready.is_set() and placement_ready.is_set()
@zkclient.DataWatch(z.path.server_presence(self._hostname))
@utils.exit_on_unhandled
def _server_presence_watch(data, _stat, event):
"""Watch server presence."""
if data is None and event is None:
_LOGGER.info('Presence node not found, waiting.')
presence_ready.clear()
elif event is not None and event.type == 'DELETED':
_LOGGER.info('Presence node deleted.')
presence_ready.clear()
else:
_LOGGER.info('Presence node found.')
presence_ready.set()
self._cache_notify(_is_ready())
return True
@utils.exit_on_unhandled
def _app_watch(apps):
"""Watch application placement."""
self._synchronize(
zkclient, apps, check_existing=not placement_ready.is_set()
)
return True
def _check_placement():
if placement_ready.is_set():
return
if zkclient.exists(z.path.placement(self._hostname)):
_LOGGER.info('Placement node found.')
zkclient.ChildrenWatch(
z.path.placement(self._hostname), _app_watch
)
placement_ready.set()
self._cache_notify(_is_ready())
else:
_LOGGER.info('Placement node not found, waiting.')
while True:
_check_placement()
# Refresh watchdog
watchdog_lease.heartbeat()
time.sleep(_HEARTBEAT_SEC)
self._cache_notify(_is_ready())
if once:
break
# Graceful shutdown.
_LOGGER.info('service shutdown.')
watchdog_lease.remove()
def _synchronize(self, zkclient, expected, check_existing=False):
"""Synchronize local app cache with the expected list.
:param ``list`` expected:
List of instances expected to be running on the server.
:param ``bool`` check_existing:
Whether to check if the already existing entries are up to date.
"""
expected_set = set(expected)
current_set = {
os.path.basename(manifest)
for manifest in glob.glob(os.path.join(self.tm_env.cache_dir, '*'))
}
extra = current_set - expected_set
missing = expected_set - current_set
existing = current_set & expected_set
_LOGGER.info('expected : %s', ','.join(expected_set))
_LOGGER.info('actual : %s', ','.join(current_set))
_LOGGER.info('extra : %s', ','.join(extra))
_LOGGER.info('missing : %s', ','.join(missing))
# If app is extra, remove the entry from the cache
for app in extra:
manifest = os.path.join(self.tm_env.cache_dir, app)
os.unlink(manifest)
# If app is missing, fetch its manifest in the cache
for app in missing:
self._cache(zkclient, app)
if check_existing:
_LOGGER.info('existing : %s', ','.join(existing))
for app in existing:
self._cache(zkclient, app, check_existing=True)
def _cache(self, zkclient, app, check_existing=False):
"""Read the manifest and placement data from Zk and store it as YAML in
<cache>/<app>.
:param ``str`` app:
Instance name.
:param ``bool`` check_existing:
Whether to check if the file already exists and is up to date.
"""
placement_node = z.path.placement(self._hostname, app)
try:
placement_data, placement_metadata = zkutils.get_with_metadata(
zkclient, placement_node
)
placement_time = placement_metadata.ctime / 1000.0
except kazoo.exceptions.NoNodeError:
_LOGGER.info('Placement %s/%s not found', self._hostname, app)
return
manifest_file = os.path.join(self.tm_env.cache_dir, app)
if check_existing:
try:
manifest_time = os.stat(manifest_file).st_ctime
except FileNotFoundError:
manifest_time = None
if manifest_time and manifest_time >= placement_time:
_LOGGER.info('%s is up to date', manifest_file)
return
app_node = z.path.scheduled(app)
try:
manifest = zkutils.get(zkclient, app_node)
# TODO: need a function to parse instance id from name.
manifest['task'] = app[app.index('#') + 1:]
if placement_data is not None:
manifest.update(placement_data)
fs.write_safe(
manifest_file,
lambda f: yaml.dump(manifest, stream=f),
prefix='.%s-' % app,
mode='w',
permission=0o644
)
_LOGGER.info('Created cache manifest: %s', manifest_file)
except kazoo.exceptions.NoNodeError:
_LOGGER.info('App %s not found', app)
def _cache_notify(self, is_ready):
"""Send a cache status notification event.
Note: this needs to be an event, not a once time state change so
that if appcfgmgr restarts after we enter the ready state, it will
still get notified that we are ready.
:params ``bool`` is_ready:
True if the cache folder is ready.
"""
_LOGGER.debug('cache notify (ready: %r)', is_ready)
ready_file = os.path.join(self.tm_env.cache_dir, READY_FILE)
if is_ready:
# Mark the cache folder as ready.
with io.open(ready_file, 'w'):
pass
else:
# Mark the cache folder as outdated.
fs.rm_safe(ready_file)
| Morgan-Stanley/treadmill | lib/python/treadmill/eventmgr.py | Python | apache-2.0 | 8,107 |
"""Config flow for Mailgun."""
from homeassistant.helpers import config_entry_flow
from .const import DOMAIN
config_entry_flow.register_webhook_flow(
DOMAIN,
"Mailgun Webhook",
{
"mailgun_url": "https://documentation.mailgun.com/en/latest/user_manual.html#webhooks", # noqa: E501 pylint: disable=line-too-long
"docs_url": "https://www.home-assistant.io/components/mailgun/",
},
)
| fbradyirl/home-assistant | homeassistant/components/mailgun/config_flow.py | Python | apache-2.0 | 415 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
class SyncReplicasOptimizerV2(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
<empty line>
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
<empty line>
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.SyncReplicasOptimizerV2(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
grads = opt.minimize(total_loss, global_step=self.global_step)
# You can now call get_init_tokens_op() and get_chief_queue_runner().
# Note that get_init_tokens_op() must be called before creating session
# because it modifies the graph by adding new nodes.
init_token_op = opt.get_init_tokens_op()
chief_queue_runner = opt.get_chief_queue_runner()
```
In the training program, every worker will run the train_op as if not
synchronized. But one worker (usually the chief) will need to execute the
chief_queue_runner and get_init_tokens_op from this optimizer.
```python
# When you create the supervisor, you need to add the local_init_op and
# ready_for_local_init_op to make sure the local_step is initialized to the
# global_step. Here is an example:
sv = tf.Supervisor(graph=g,
is_chief=is_chief,
# This initialize local step.
local_init_op=local_init_op,
# This makes sure global step is initialized before using.
ready_for_local_init_op=ready_for_local_init_op,
saver=model.saver)
# After the session is created by the Supervisor and before the main while
# loop:
if is_chief and FLAGS.sync_replicas:
sv.start_queue_runners(sess, [chief_queue_runner])
# Insert initial tokens to the queue.
sess.run(init_token_op)
```
@@__init__
@@compute_gradients
@@apply_gradients
@@get_chief_queue_runner
@@get_init_tokens_op
"""
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizerV2, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
self._local_step = variables.Variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.all_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step.ref())
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens],
self._global_step.ref())
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
# Please switch to v2 if you are still using the old sync optimizer. V2
# is much more efficient and stable. It also removed 100% of the stale
# gradients which is not possible in this implementation without significant
# overhead. This is kept here just for backward compatibility and will be
# DEPRECATED later.
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
summing them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following queues are created:
<empty line>
* N `gradient` queues, one per variable to train. Gradients are pushed to
these queues and the chief worker will dequeue_many and then sum them
before applying to variables.
* 1 `token` queue where the optimizer pushes the new global_step value after
all gradients have been applied.
The following variables are created:
* N `local_step`, one per replica. Compared against global step to check for
staleness of the gradients.
This adds nodes to the graph to collect gradients and pause the trainers until
variables are updated.
For the PS:
<empty line>
1. A queue is created for each variable, and each replica now pushes the
gradients into the queue instead of directly applying them to the
variables.
2. For each gradient_queue, pop and sum the gradients once enough
replicas (replicas_to_aggregate) have pushed gradients to the queue.
3. Apply the aggregated gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, clear all the gradients in the queues as they are
stale now (could happen when replicas are restarted and push to the queues
multiple times, or from the backup replicas).
6. Only after step 5, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch it to its local_step variable
and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into `gradient_queue` only
if local_step equals global_step, otherwise the gradients are just dropped.
This avoids stale gradients.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
replica_id=task_id, total_num_replicas=50)
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
grads = opt.minimize(total_loss, global_step=self.global_step)
# You can now call get_init_tokens_op() and get_chief_queue_runner().
# Note that get_init_tokens_op() must be called before creating session
# because it modifies the graph.
init_token_op = opt.get_init_tokens_op()
chief_queue_runner = opt.get_chief_queue_runner()
```
In the training program, every worker will run the train_op as if not
synchronized. But one worker (usually the chief) will need to execute the
chief_queue_runner and get_init_tokens_op generated from this optimizer.
```python
# After the session is created by the Supervisor and before the main while
# loop:
if is_chief and FLAGS.sync_replicas:
sv.start_queue_runners(sess, [chief_queue_runner])
# Insert initial tokens to the queue.
sess.run(init_token_op)
```
@@__init__
@@compute_gradients
@@apply_gradients
@@get_chief_queue_runner
@@get_init_tokens_op
"""
def __init__(self,
opt,
replicas_to_aggregate,
variable_averages=None,
variables_to_average=None,
replica_id=None,
total_num_replicas=0,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
replica_id: This is the task/worker/replica ID. Needed as index to access
local_steps to check staleness. Must be in the interval:
[0, total_num_replicas)
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas == 0:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info("""TO BE DEPRECATED!!!
This version will be deprecated. Please switch to V2 at your
earliest convenience.""")
logging.info(
"SyncReplicas enabled: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._replica_id = replica_id
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# This will be executed in a queue runner and includes the synchronization
# operations done by the chief.
self._chief_queue_runner = None
# Remember which queue is on which device for the "clear" operation.
# This list contains list of the following format: (grad_queue, device).
self._one_element_queue_list = []
# Sparse gradients queue has both value and index
self._sparse_grad_queues_and_devs = []
# clean_up_op will be executed when the chief is about to restart.
# If chief restarts, it is possible that some variables have already been
# updated before and when chief comes back, these variables will not be
# updated again as the workers have already computed the gradients for
# them.
# But chief still waits for all variables to be updated, which will hang
# the training.
# To avoid such hang, every time the chief is about to die, it will call
# abort_op to kill the PS with the token_queue so all replicas will also
# restart.
# TODO(jmchen): When training restarts, the variables are restored from the
# previous checkpoint. As such all the gradients in all the queues should be
# removed as they are computed from potentially different variables.
# Currently this is not done.
self._clean_up_op = None
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def _aggregate_sparse_grad(self, grad, var, train_ops):
"""Aggregate sparse gradients.
Args:
grad: The sparse gradient to aggregate.
var: The variable to apply this gradient to.
train_ops: The train_ops for the worker to run.
Returns:
aggregated_grad: Aggregated grad.
"""
# Sparse gradients have to be inserted as one pair of (value,
# indice) as an element instead of the whole "indexedslice" because
# their shapes are not deterministic.
sparse_grad_queue = (data_flow_ops.FIFOQueue(
-1,
(grad.values.dtype, grad.indices.dtype),
shapes=(var.get_shape().as_list()[1:], ()),
shared_name="sparse_grad_q_%s" % var.name))
self._sparse_grad_queues_and_devs.append((sparse_grad_queue, var.device))
# Sparse token is inserted after the "enqueue_many" finishes. This
# is needed to make sure enough sparse gradients have been enqueued
# before applying them to the variables.
sparse_token_queue = (data_flow_ops.FIFOQueue(
self._replicas_to_aggregate * 2,
types_pb2.DT_INT32,
shapes=(),
shared_name="sparse_token_q_%s" % var.name))
self._one_element_queue_list.append((sparse_token_queue, var.device))
enqueue_spares_op = sparse_grad_queue.enqueue_many([grad.values,
grad.indices])
with ops.control_dependencies([enqueue_spares_op]):
train_ops.append(sparse_token_queue.enqueue((1,)))
with ops.control_dependencies([sparse_token_queue.dequeue_many(
self._replicas_to_aggregate)]):
values, indices = sparse_grad_queue.dequeue_many(sparse_grad_queue.size())
concat_grad = ops.IndexedSlices(values, indices, grad.dense_shape)
# Sum the gradients of the same variables in the sparse layers so
# that each variable is only updated once. Note that with 2
# gradients g1 and g2 from 2 replicas for the same variable,
# apply(g1+g2) is different from apply(g1) and then apply(g2) when
# the optimizer is complex like Momentum or Adagrad.
values = concat_grad.values
indices = concat_grad.indices
new_indices, indx = array_ops.unique(indices)
num_indices = array_ops.shape(new_indices)[0]
sum_values = math_ops.unsorted_segment_sum(values, indx, num_indices)
return ops.IndexedSlices(sum_values, new_indices, concat_grad.dense_shape)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
inputs = []
var_list = []
for x in grads_and_vars:
inputs.extend(list(x))
with ops.device(global_step.device):
self._local_steps = variables.Variable(
array_ops.zeros(
[self._total_num_replicas],
dtype=global_step.dtype),
trainable=False,
name="local_steps")
# Check staleness. Note that this has to be ref(), otherwise identity will
# be accessed and it will be old values.
local_step = array_ops.slice(self._local_steps.ref(),
array_ops.reshape(self._replica_id, (1,)),
[1],
name="get_local_step")
local_step = array_ops.reshape(local_step, ())
is_stale = math_ops.less(local_step, global_step)
with ops.name_scope(name, self._name, inputs) as name:
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
if isinstance(grad, ops.Tensor):
gradient_queue = (data_flow_ops.FIFOQueue(self._tokens_per_step * 2,
grad.dtype,
shapes=var.get_shape(),
shared_name=var.name))
self._one_element_queue_list.append((gradient_queue, var.device))
train_ops.append(gradient_queue.enqueue([grad]))
# Aggregate all gradients
gradients = gradient_queue.dequeue_many(
self._replicas_to_aggregate)
aggregated_grad.append(math_ops.reduce_sum(gradients, [0]))
elif grad is None:
aggregated_grad.append(None) # pass-through.
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
aggregated_grad.append(self._aggregate_sparse_grad(grad, var,
train_ops))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
shared_name="dummy_queue"))
# Clear all the gradients queues in case there are stale gradients.
clear_queue_ops = []
with ops.control_dependencies([update_op]):
for queue, dev in self._one_element_queue_list:
with ops.device(dev):
stale_grads = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_grads)
for queue, dev in self._sparse_grad_queues_and_devs:
with ops.device(dev):
_, stale_indices = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_indices)
with ops.device(global_step.device):
self._clean_up_op = control_flow_ops.abort(
error_msg="From sync_replicas")
# According to the staleness, select between the enqueue op (real_grad)
# or no-op (no_op_grad). Effectively dropping all the stale gradients.
no_op_grad = lambda: [control_flow_ops.no_op(name="no_grad_enqueue")]
real_grad = lambda: [control_flow_ops.group(*train_ops)]
final_train_ops = control_flow_ops.cond(is_stale, no_op_grad, real_grad)
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies([final_train_ops]):
token = sync_token_queue.dequeue()
train_op = state_ops.scatter_update(self._local_steps,
self._replica_id,
token, name=name)
with ops.control_dependencies(clear_queue_ops):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
# Note that ref() is used to avoid reading from the identity with old
# the step.
tokens = array_ops.fill([self._tokens_per_step], global_step.ref())
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_clean_up_op(self):
"""Returns the clean up op for the chief to execute before exit.
This includes the operation to abort the device with the token queue so all
other replicas can also restart. This can avoid potential hang when chief
restarts.
Note that this can only be called after calling apply_gradients().
Returns:
A clean_up_op for chief to execute before exits.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError(
"get_clean_up_op() should be called after apply_gradients().")
return self._clean_up_op
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens],
self._global_step.ref())
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
| XueqingLin/tensorflow | tensorflow/python/training/sync_replicas_optimizer.py | Python | apache-2.0 | 40,433 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings # noqa
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
INDEX_URL = "horizon:admin:projects:index"
ADD_USER_URL = "horizon:admin:projects:create_user"
PROJECT_GROUP_ENABLED = keystone.VERSIONS.active >= 3
PROJECT_USER_MEMBER_SLUG = "update_members"
PROJECT_GROUP_MEMBER_SLUG = "update_group_members"
class UpdateProjectQuotaAction(workflows.Action):
ifcb_label = _("Injected File Content Bytes")
metadata_items = forms.IntegerField(min_value=-1,
label=_("Metadata Items"))
cores = forms.IntegerField(min_value=-1, label=_("VCPUs"))
instances = forms.IntegerField(min_value=-1, label=_("Instances"))
injected_files = forms.IntegerField(min_value=-1,
label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(min_value=-1,
label=ifcb_label)
volumes = forms.IntegerField(min_value=-1, label=_("Volumes"))
snapshots = forms.IntegerField(min_value=-1, label=_("Snapshots"))
gigabytes = forms.IntegerField(min_value=-1, label=_("Gigabytes"))
ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)"))
floating_ips = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
fixed_ips = forms.IntegerField(min_value=-1, label=_("Fixed IPs"))
security_groups = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rules = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
# Neutron
floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
network = forms.IntegerField(min_value=-1, label=_("Networks"))
port = forms.IntegerField(min_value=-1, label=_("Ports"))
router = forms.IntegerField(min_value=-1, label=_("Routers"))
subnet = forms.IntegerField(min_value=-1, label=_("Subnets"))
def __init__(self, request, *args, **kwargs):
super(UpdateProjectQuotaAction, self).__init__(request,
*args,
**kwargs)
disabled_quotas = quotas.get_disabled_quotas(request)
for field in disabled_quotas:
if field in self.fields:
self.fields[field].required = False
self.fields[field].widget = forms.HiddenInput()
class Meta:
name = _("Quota")
slug = 'update_quotas'
help_text = _("From here you can set quotas "
"(max limits) for the project.")
class UpdateProjectQuota(workflows.Step):
action_class = UpdateProjectQuotaAction
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectInfoAction(workflows.Action):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, request, *args, **kwargs):
super(CreateProjectInfoAction, self).__init__(request,
*args,
**kwargs)
# For keystone V3, display the two fields in read-only
if keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
class Meta:
name = _("Project Info")
help_text = _("From here you can create a new "
"project to organize users.")
class CreateProjectInfo(workflows.Step):
action_class = CreateProjectInfoAction
contributes = ("domain_id",
"domain_name",
"project_id",
"name",
"description",
"enabled")
class UpdateProjectMembersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectMembersAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve user list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = _('Could not find default role "%s" in Keystone') % \
default
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if project_id:
try:
project_members = api.keystone.user_list(request,
project=project_id)
except Exception:
exceptions.handle(request, err_msg)
for user in project_members:
try:
roles = api.keystone.roles_for_user(self.request,
user.id,
project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(user.id)
class Meta:
name = _("Project Members")
slug = PROJECT_USER_MEMBER_SLUG
class UpdateProjectMembers(workflows.UpdateMembersStep):
action_class = UpdateProjectMembersAction
available_list_title = _("All Users")
members_list_title = _("Project Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve user list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateProjectGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = _('Could not find default role "%s" in Keystone') % \
default
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if project_id:
for group in all_groups:
try:
roles = api.keystone.roles_for_group(self.request,
group=group.id,
project=project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(group.id)
class Meta:
name = _("Project Groups")
slug = PROJECT_GROUP_MEMBER_SLUG
class UpdateProjectGroups(workflows.UpdateMembersStep):
action_class = UpdateProjectGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Project Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CreateProject(workflows.Workflow):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:admin:projects:index"
default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown project')
def handle(self, request, data):
# create the project
domain_id = data['domain_id']
try:
desc = data['description']
self.object = api.keystone.tenant_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'],
domain=domain_id)
except Exception:
exceptions.handle(request, ignore=True)
return False
project_id = self.object.id
# update project members
users_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
# count how many users are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_to_add += len(role_list)
# add new users to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_added = 0
for user in role_list:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user,
role=role.id)
users_added += 1
users_to_add -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", add project groups")
else:
group_msg = ""
exceptions.handle(request, _('Failed to add %(users_to_add)s '
'project members%(group_msg)s and '
'set project quotas.')
% {'users_to_add': users_to_add,
'group_msg': group_msg})
if PROJECT_GROUP_ENABLED:
# update project groups
groups_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
# count how many groups are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_to_add += len(role_list)
# add new groups to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_added = 0
for group in role_list:
api.keystone.add_group_role(request,
role=role.id,
group=group,
project=project_id)
groups_added += 1
groups_to_add -= groups_added
except Exception:
exceptions.handle(request, _('Failed to add %s project groups '
'and update project quotas.'
% groups_to_add))
# Update the project quota.
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
try:
nova.tenant_quota_update(request, project_id, **nova_data)
if base.is_service_enabled(request, 'volume'):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = dict([(key, data[key]) for key in
quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
except Exception:
exceptions.handle(request, _('Unable to set project quotas.'))
return True
class UpdateProjectInfoAction(CreateProjectInfoAction):
enabled = forms.BooleanField(required=False, label=_("Enabled"))
class Meta:
name = _("Project Info")
slug = 'update_info'
help_text = _("From here you can edit the project details.")
class UpdateProjectInfo(workflows.Step):
action_class = UpdateProjectInfoAction
depends_on = ("project_id",)
contributes = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
class UpdateProject(workflows.Workflow):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:admin:projects:index"
default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown project')
def handle(self, request, data):
# FIXME(gabriel): This should be refactored to use Python's built-in
# sets and do this all in a single "roles to add" and "roles to remove"
# pass instead of the multi-pass thing happening now.
project_id = data['project_id']
domain_id = ''
# update project info
try:
project = api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'])
# Use the domain_id from the project if available
domain_id = getattr(project, "domain_id", None)
except Exception:
exceptions.handle(request, ignore=True)
return False
# update project members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = api.keystone.role_list(request)
# Get the users currently associated with this project so we
# can diff against it.
project_members = api.keystone.user_list(request,
project=project_id)
users_to_modify = len(project_members)
for user in project_members:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_user(self.request,
user.id,
project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_tenant_user_role(
request,
project=project_id,
user=user.id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Prevent admins from doing stupid things to themselves.
is_current_user = user.id == request.user.id
is_current_project = project_id == request.user.tenant_id
admin_roles = [role for role in current_roles
if role.name.lower() == 'admin']
if len(admin_roles):
removing_admin = any([role.id in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_project and removing_admin:
# Cannot remove "admin" role on current(admin) project
msg = _('You cannot revoke your administrative privileges '
'from the project you are currently logged into. '
'Please switch to another project with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
# Otherwise go through and revoke any removed roles.
else:
for id_to_delete in current_role_ids:
api.keystone.remove_tenant_user_role(
request,
project=project_id,
user=user.id,
role=id_to_delete)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if not filter(lambda x: user_id == x.id, project_members):
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", update project groups")
else:
group_msg = ""
exceptions.handle(request, _('Failed to modify %(users_to_modify)s'
' project members%(group_msg)s and '
'update project quotas.')
% {'users_to_modify': users_to_modify,
'group_msg': group_msg})
return True
if PROJECT_GROUP_ENABLED:
# update project groups
groups_to_modify = 0
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
try:
# Get the groups currently associated with this project so we
# can diff against it.
project_groups = api.keystone.group_list(request,
domain=domain_id,
project=project_id)
groups_to_modify = len(project_groups)
for group in project_groups:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
project=project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
project=project_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
project=project_id)
groups_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id,
project_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
project=project_id)
groups_added += 1
groups_to_modify -= groups_added
except Exception:
exceptions.handle(request, _('Failed to modify %s project '
'members, update project groups '
'and update project quotas.'
% groups_to_modify))
return True
# update the project quota
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
try:
nova.tenant_quota_update(request,
project_id,
**nova_data)
if base.is_service_enabled(request, 'volume'):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = dict([(key, data[key]) for key in
quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
return True
except Exception:
exceptions.handle(request, _('Modified project information and '
'members, but unable to modify '
'project quotas.'))
return True
| netscaler/horizon | openstack_dashboard/dashboards/admin/projects/workflows.py | Python | apache-2.0 | 33,143 |
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from threading import Thread
LOG = logging.getLogger(__name__)
_TASKS = {}
class _Task(Thread):
def __init__(self, tid, interval, repeat, func, *args, **kwargs):
super(_Task, self).__init__()
self.tid = tid
self.interval = interval
self.repeat = repeat
self.func = func
self.args = args
self.kwargs = kwargs
self._stop = False
_TASKS[tid] = self
self.start()
def run(self):
for dummy in range(self.repeat):
if self._stop:
break
retval = self.func(*self.args, **self.kwargs)
if retval is not None:
break
time.sleep(self.interval)
_TASKS.pop(self.tid, None)
def stop(self):
self._stop = True
def start(tid, interval, repeat, func, *args, **kwargs):
"""
Start a new task
"""
LOG.info('start(tid=%s, interval=%s, repeat=%s, func=%s, args=%s, '
'kwargs=%s)', tid, interval, repeat, func.__name__, args, kwargs)
_Task(tid, interval, repeat, func, *args, **kwargs)
def stop(tid):
"""
Stop a running task
"""
LOG.info('stop(tid=%s)', tid)
t = _TASKS.get(tid, None)
if t is not None:
t.stop()
def stop_all(wait=False):
"""
Stop all running tasks
"""
LOG.info('stop_all()')
for tid in _TASKS:
stop(tid)
if wait:
while _TASKS:
time.sleep(0.5)
| dtroyer/dwarf | dwarf/task.py | Python | apache-2.0 | 2,171 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
def _check_penalty_number(x):
"""check penalty number availability, raise ValueError if failed."""
if not isinstance(x, (float, int)):
raise ValueError(('Value: {} is not a valid regularization penalty number, '
'expected an int or float value').format(x))
if math.isinf(x) or math.isnan(x):
raise ValueError(
('Value: {} is not a valid regularization penalty number, '
'a positive/negative infinity or NaN is not a property value'
).format(x))
def _none_to_default(inputs, default):
return default if inputs is None else default
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API will
depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and
`Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers and
the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=tf.keras.regularizers.L1(0.01),
... activity_regularizer=tf.keras.regularizers.L2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size) is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>
## Available penalties
```python
tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty
tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty
tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = tf.keras.regularizers.L2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this functionality,
you must make sure any python process running your model has also defined
and registered your custom regularizer.
`tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and
beyond. In earlier versions of TensorFlow you must pass your custom
regularizer to the `custom_objects` argument of methods that expect custom
regularizers to be registered as serializable.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(str(self) + ' does not implement get_config()')
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(square(x))`
L1L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2')
In this case, the default values used are `l1=0.01` and `l2=0.01`.
Attributes:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
# The default value for l1 and l2 are different from the value in l1_l2
# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
# and no l1 penalty.
l1 = 0. if l1 is None else l1
l2 = 0. if l2 is None else l2
_check_penalty_number(l1)
_check_penalty_number(l2)
self.l1 = backend.cast_to_floatx(l1)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
regularization = backend.constant(0., dtype=x.dtype)
if self.l1:
regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))
if self.l2:
regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
@keras_export('keras.regularizers.L1', 'keras.regularizers.l1')
class L1(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Attributes:
l1: Float; L1 regularization factor.
"""
def __init__(self, l1=0.01, **kwargs): # pylint: disable=redefined-outer-name
l1 = kwargs.pop('l', l1) # Backwards compatibility
if kwargs:
raise TypeError('Argument(s) not recognized: %s' % (kwargs,))
l1 = 0.01 if l1 is None else l1
_check_penalty_number(l1)
self.l1 = backend.cast_to_floatx(l1)
def __call__(self, x):
return self.l1 * math_ops.reduce_sum(math_ops.abs(x))
def get_config(self):
return {'l1': float(self.l1)}
@keras_export('keras.regularizers.L2', 'keras.regularizers.l2')
class L2(Regularizer):
"""A regularizer that applies a L2 regularization penalty.
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')
In this case, the default value used is `l2=0.01`.
Attributes:
l2: Float; L2 regularization factor.
"""
def __init__(self, l2=0.01, **kwargs): # pylint: disable=redefined-outer-name
l2 = kwargs.pop('l', l2) # Backwards compatibility
if kwargs:
raise TypeError('Argument(s) not recognized: %s' % (kwargs,))
l2 = 0.01 if l2 is None else l2
_check_penalty_number(l2)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
return self.l2 * math_ops.reduce_sum(math_ops.square(x))
def get_config(self):
return {'l2': float(self.l2)}
@keras_export('keras.regularizers.l1_l2')
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
r"""Create a regularizer that applies both L1 and L2 penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
Args:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
Returns:
An L1L2 Regularizer with the given regularization factors.
"""
return L1L2(l1=l1, l2=l2)
# Deserialization aliases.
l1 = L1
l2 = L2
@keras_export('keras.regularizers.serialize')
def serialize(regularizer):
return serialize_keras_object(regularizer)
@keras_export('keras.regularizers.deserialize')
def deserialize(config, custom_objects=None):
if config == 'l1_l2':
# Special case necessary since the defaults used for "l1_l2" (string)
# differ from those of the L1L2 class.
return L1L2(l1=0.01, l2=0.01)
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
@keras_export('keras.regularizers.get')
def get(identifier):
"""Retrieve a regularizer instance from a config or identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(
'Could not interpret regularizer identifier: {}'.format(identifier))
| annarev/tensorflow | tensorflow/python/keras/regularizers.py | Python | apache-2.0 | 12,860 |
import arrow
from dateutil import tz
import flask
import CONFIG
START_TIME = CONFIG.START_TIME
END_TIME = CONFIG.END_TIME
def get_busy_times(events):
"""
Gets a list of busy times calculated from the list of events.
:param events: a list of calendar events.
:return: a list of busy times in ascending order.
"""
begin_date = arrow.get(flask.session["begin_date"]).replace(
hours=+START_TIME)
end_date = arrow.get(flask.session['end_date']).replace(hours=+END_TIME)
busy_dict = get_busy_dict(events, begin_date, end_date)
busy = get_busy_list(busy_dict)
return busy
def get_busy_dict(events, begin_date, end_date):
"""
Fills a dictionary with possible busy times from the list of events.
:param events: a list of calendar events.
:param begin_date: is the start of the selected time interval.
:param end_date: is the end of the selected time interval.
:return: a dict of events representing possible busy times.
"""
busy_dict = {}
# print('busy times')
for event in events:
available = is_available(event)
event_start, event_end, is_all_day = get_start_end_datetime(event)
day_start = event_start.replace(hour=START_TIME, minute=0)
day_end = event_end.replace(hour=END_TIME, minute=0)
# all day events that either begin or end in the time interval
if ((begin_date <= event_start <= end_date or
begin_date <= event_end <= end_date) and
not available and is_all_day):
if day_start < begin_date:
event['start']['dateTime'] = begin_date.isoformat()
else:
event['start']['dateTime'] = day_start.isoformat()
if event_end > end_date:
event['end']['dateTime'] = end_date.isoformat()
else:
event['end']['dateTime'] = day_end.replace(days=-1).isoformat()
# print('0 {} - {}'.format(event['start']['dateTime'],
# event['end']['dateTime']))
busy_dict[event['start']['dateTime']] = event
# events completely within individual days and the time interval
elif (begin_date <= event_start <= end_date and
begin_date <= event_end <= end_date and
not available and not is_all_day):
if event_start < day_start:
event['start']['dateTime'] = day_start.isoformat()
if event_end > day_end:
event['end']['dateTime'] = day_end.isoformat()
# print('1 {} - {}'.format(event['start']['dateTime'],
# event['end']['dateTime']))
busy_dict[event['start']['dateTime']] = event
# print()
return busy_dict
def get_busy_list(busy_dict):
"""
Removes or combines the possible busy times from the busy dictionary and
returns a sorted list.
:param busy_dict: a dict of events representing possible busy times.
:return: a sorted list of events representing busy times.
"""
busy = []
remove_list = []
for i in sorted(busy_dict):
for j in sorted(busy_dict):
event = busy_dict[i]
event_start = arrow.get(event['start']['dateTime'])
event_end = arrow.get(event['end']['dateTime'])
event_end_time = event_end.format('HH:mm')
other_event = busy_dict[j]
other_event_start = arrow.get(other_event['start']['dateTime'])
other_event_end = arrow.get(other_event['end']['dateTime'])
other_event_start_time = other_event_start.format('HH:mm')
other_event_start_mod = other_event_start.replace(days=-1,
hour=END_TIME)
if event != other_event:
if (other_event_start >= event_start and
other_event_end <= event_end):
remove_list.append(other_event)
if (event_end_time == '17:00' and
other_event_start_time == '09:00' and
event_end == other_event_start_mod):
event['end']['dateTime'] = other_event['end']['dateTime']
remove_list.append(other_event)
if event_end == other_event_start:
event['end']['dateTime'] = other_event['end']['dateTime']
remove_list.append(other_event)
for i in sorted(busy_dict):
if busy_dict[i] not in remove_list:
busy.append(busy_dict[i])
return busy
def get_events(service):
"""
Gets a list of events from the Google calendar service.
:param service: is the Google service from where the calendar is retrieved.
:return: a list of events.
"""
events = []
for cal_id in flask.session['checked_calendars']:
cal_items = service.events().list(calendarId=cal_id).execute()
for cal_item in cal_items['items']:
events.append(cal_item)
return events
def is_available(event):
"""
Checks if the event has the transparency attribute.
:param event: is the event to check.
:return: True if it is transparent and False if not
"""
if 'transparency' in event:
available = True
else:
available = False
return available
def get_start_end_datetime(event):
"""
Gets the event's start and end as arrow objects.
:param event: is the event to check.
:return: a 2-tuple of the events start and end as an arrow objects.
"""
is_all_day = False
if 'dateTime' in event['start']:
event_start = arrow.get(
event['start']['dateTime']).replace(tzinfo=tz.tzlocal())
event_end = arrow.get(
event['end']['dateTime']).replace(tzinfo=tz.tzlocal())
else:
event_start = arrow.get(
event['start']['date']).replace(tzinfo=tz.tzlocal())
event_end = arrow.get(
event['end']['date']).replace(tzinfo=tz.tzlocal())
is_all_day = True
return event_start, event_end, is_all_day
| hkhamm/proj7-freetimes | busy_times.py | Python | artistic-2.0 | 6,156 |
import os
import unittest
from tethys_apps.static_finders import TethysStaticFinder
class TestTethysStaticFinder(unittest.TestCase):
def setUp(self):
self.src_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
self.root = os.path.join(self.src_dir, 'tests', 'apps', 'tethysapp-test_app',
'tethysapp', 'test_app', 'public')
def tearDown(self):
pass
def test_init(self):
pass
def test_find(self):
tethys_static_finder = TethysStaticFinder()
path = 'test_app/css/main.css'
ret = tethys_static_finder.find(path)
self.assertEqual(os.path.join(self.root, 'css/main.css'), ret)
def test_find_all(self):
tethys_static_finder = TethysStaticFinder()
path = 'test_app/css/main.css'
ret = tethys_static_finder.find(path, all=True)
self.assertIn(os.path.join(self.root, 'css/main.css'), ret)
def test_find_location_with_no_prefix(self):
prefix = None
path = 'css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertEqual(os.path.join(self.root, path), ret)
def test_find_location_with_prefix_not_in_path(self):
prefix = 'tethys_app'
path = 'css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertIsNone(ret)
def test_find_location_with_prefix_in_path(self):
prefix = 'tethys_app'
path = 'tethys_app/css/main.css'
tethys_static_finder = TethysStaticFinder()
ret = tethys_static_finder.find_location(self.root, path, prefix)
self.assertEqual(os.path.join(self.root, 'css/main.css'), ret)
def test_list(self):
tethys_static_finder = TethysStaticFinder()
expected_ignore_patterns = ''
expected_app_paths = []
for path, storage in tethys_static_finder.list(expected_ignore_patterns):
if 'test_app' in storage.location:
expected_app_paths.append(path)
self.assertIn('js/main.js', expected_app_paths)
self.assertIn('images/icon.gif', expected_app_paths)
self.assertIn('css/main.css', expected_app_paths)
| tethysplatform/tethys | tests/unit_tests/test_tethys_apps/test_static_finders.py | Python | bsd-2-clause | 2,361 |
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream
MEDIA_URL = "http://www.ardmediathek.de/play/media/{0}"
SWF_URL = "http://www.ardmediathek.de/ard/static/player/base/flash/PluginFlash.swf"
HDCORE_PARAMETER = "?hdcore=3.3.0"
QUALITY_MAP = {
"auto": "auto",
3: "544p",
2: "360p",
1: "288p",
0: "144p"
}
_url_re = re.compile(r"http(s)?://(?:(\w+\.)?ardmediathek.de/tv|mediathek.daserste.de/)")
_media_id_re = re.compile(r"/play/(?:media|config)/(\d+)")
_media_schema = validate.Schema({
"_mediaArray": [{
"_mediaStreamArray": [{
validate.optional("_server"): validate.text,
"_stream": validate.any(validate.text, [validate.text]),
"_quality": validate.any(int, validate.text)
}]
}]
})
_smil_schema = validate.Schema(
validate.union({
"base": validate.all(
validate.xml_find("head/meta"),
validate.get("base"),
validate.url(scheme="http")
),
"cdn": validate.all(
validate.xml_find("head/meta"),
validate.get("cdn")
),
"videos": validate.all(
validate.xml_findall("body/seq/video"),
[validate.get("src")]
)
})
)
class ard_mediathek(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_http_streams(self, info):
name = QUALITY_MAP.get(info["_quality"], "vod")
urls = info["_stream"]
if not isinstance(info["_stream"], list):
urls = [urls]
for url in urls:
stream = HTTPStream(self.session, url)
yield name, stream
def _get_hds_streams(self, info):
# Needs the hdcore parameter added
url = info["_stream"] + HDCORE_PARAMETER
return HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL).items()
def _get_hls_streams(self, info):
return HLSStream.parse_variant_playlist(self.session, info["_stream"]).items()
def _get_smil_streams(self, info):
res = self.session.http.get(info["_stream"])
smil = self.session.http.xml(res, "SMIL config", schema=_smil_schema)
for video in smil["videos"]:
url = "{0}/{1}{2}".format(smil["base"], video, HDCORE_PARAMETER)
streams = HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL, is_akamai=smil["cdn"] == "akamai")
for stream in streams.items():
yield stream
def _get_streams(self):
res = self.session.http.get(self.url)
match = _media_id_re.search(res.text)
if match:
media_id = match.group(1)
else:
return
self.logger.debug("Found media id: {0}", media_id)
res = self.session.http.get(MEDIA_URL.format(media_id))
media = self.session.http.json(res, schema=_media_schema)
for media in media["_mediaArray"]:
for stream in media["_mediaStreamArray"]:
stream_ = stream["_stream"]
if isinstance(stream_, list):
if not stream_:
continue
stream_ = stream_[0]
if stream_.endswith(".f4m"):
parser = self._get_hds_streams
parser_name = "HDS"
elif stream_.endswith(".smil"):
parser = self._get_smil_streams
parser_name = "SMIL"
elif stream_.endswith(".m3u8"):
parser = self._get_hls_streams
parser_name = "HLS"
elif stream_.startswith("http"):
parser = self._get_http_streams
parser_name = "HTTP"
try:
for s in parser(stream):
yield s
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}",
parser_name, err)
__plugin__ = ard_mediathek
| back-to/streamlink | src/streamlink/plugins/ard_mediathek.py | Python | bsd-2-clause | 4,132 |
import dmx
import socket
wash = dmx.EuroliteMovingHeadWash(base=1, color=(1, 1, 1), intensity=1)
controller = dmx.DMXController(debug=True, fixtures=[wash], port='/dev/tty.usbmodem1421')
controller.enabled = True
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('localhost', 9000))
while True:
message = server.recv(1000)
command, argument = message.strip().split(' ', 1)
if command == 'color':
rgb = int(argument, 16)
wash.color = (rgb >> 16) / 255., ((rgb >> 8) & 0xff) / 255., (rgb & 0xff) / 255.
elif command == 'tilt':
wash.tilt = int(argument)
elif command == 'pan':
wash.pan = int(argument)
| jonathanhogg/coderdojo-sequencer | light_controller/light_controller.py | Python | bsd-2-clause | 678 |
class JoommfError(Exception):
pass
| fangohr/oommf-python | joommf/exceptions.py | Python | bsd-2-clause | 39 |
"""Two dimensional checkerboard lattice with real hoppings"""
import pybinding as pb
import matplotlib.pyplot as plt
from math import pi
pb.pltutils.use_style()
def checkerboard(d=0.2, delta=1.1, t=0.6):
lat = pb.Lattice(a1=[d, 0], a2=[0, d])
lat.add_sublattices(
('A', [0, 0], -delta),
('B', [d/2, d/2], delta)
)
lat.add_hoppings(
([ 0, 0], 'A', 'B', t),
([ 0, -1], 'A', 'B', t),
([-1, 0], 'A', 'B', t),
([-1, -1], 'A', 'B', t)
)
return lat
lattice = checkerboard()
lattice.plot()
plt.show()
lattice.plot_brillouin_zone()
plt.show()
model = pb.Model(checkerboard(), pb.translational_symmetry())
solver = pb.solver.lapack(model)
bands = solver.calc_bands([0, 0], [0, 5*pi], [5*pi, 5*pi], [0, 0])
bands.plot()
plt.show()
| dean0x7d/pybinding | docs/examples/lattice/checkerboard.py | Python | bsd-2-clause | 801 |
import os
from unittest import skip
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from onadata.apps.main.views import show, form_photos, update_xform, profile,\
enketo_preview
from onadata.apps.logger.models import XForm
from onadata.apps.logger.views import download_xlsform, download_jsonform,\
download_xform, delete_xform
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.viewer.views import export_list, map_view
from onadata.libs.utils.logger_tools import publish_xml_form
from onadata.libs.utils.user_auth import http_auth_string
from test_base import TestBase
class TestFormShow(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form()
self.url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def test_show_form_name(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.xform.id_string)
def test_hide_from_anon(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 302)
def test_hide_from_not_user(self):
self._create_user_and_login("jo")
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_show_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 200)
def test_dl_xlsx_xlsform(self):
self._publish_xlsx_file()
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'exp_one'
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
"attachment; filename=exp_one.xlsx")
def test_dl_xls_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xls_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_jsonp_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
callback = 'jsonpCallback'
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), {'callback': callback})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.startswith(callback + '('), True)
self.assertEqual(response.content.endswith(')'), True)
def test_dl_json_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_for_cors_options(self):
response = self.anon.options(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
allowed_headers = ['Accept', 'Origin', 'X-Requested-With',
'Authorization']
control_headers = response['Access-Control-Allow-Headers']
provided_headers = [h.strip() for h in control_headers.split(',')]
self.assertListEqual(allowed_headers, provided_headers)
self.assertEqual(response['Access-Control-Allow-Methods'], 'GET')
self.assertEqual(response['Access-Control-Allow-Origin'], '*')
def test_dl_xform_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_authenticated_non_owner(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(reverse(download_xform, kwargs={
'username': 'bob',
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_show_private_if_shared_but_not_data(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertContains(response, 'PRIVATE')
def test_show_link_if_shared_and_data(self):
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
self._submit_transport_instance()
response = self.anon.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
def test_show_link_if_owner(self):
self._submit_transport_instance()
response = self.client.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
}))
self.assertNotContains(response, reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
# check that a form with geopoints has the map url
response = self._publish_xls_file(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "gps.xls"))
self.assertEqual(response.status_code, 200)
self.xform = XForm.objects.latest('date_created')
show_url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
map_url = reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(show_url)
# check that map url doesnt show before we have submissions
self.assertNotContains(response, map_url)
# make a submission
self._make_submission(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "instances",
"gps_1980-01-23_20-52-08.xml")
)
self.assertEqual(self.response.status_code, 201)
# get new show view
response = self.client.get(show_url)
self.assertContains(response, map_url)
def test_user_sees_edit_btn(self):
response = self.client.get(self.url)
self.assertContains(response, 'edit</a>')
def test_user_sees_settings(self):
response = self.client.get(self.url)
self.assertContains(response, 'Settings')
def test_anon_no_edit_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'edit</a>')
def test_anon_no_toggle_data_share_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'PUBLIC</a>')
self.assertNotContains(response, 'PRIVATE</a>')
def test_show_add_sourc_doc_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Source document:')
def test_show_add_supporting_docs_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Supporting document:')
def test_show_add_supporting_media_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Media upload:')
def test_show_add_mapbox_layer_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'JSONP url:')
def test_hide_add_supporting_docs_if_not_owner(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'Upload')
def test_load_photo_page(self):
response = self.client.get(reverse(form_photos, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string}))
self.assertEqual(response.status_code, 200)
def test_load_from_uuid(self):
self.xform = XForm.objects.get(pk=self.xform.id)
response = self.client.get(reverse(show, kwargs={
'uuid': self.xform.uuid}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'%s%s' % (self.base_url, self.url))
def test_xls_replace_markup(self):
"""
Check that update form is only shown when there are no submissions
and the user is the owner
"""
# when we have 0 submissions, update markup exists
self.xform.shared = True
self.xform.save()
dashboard_url = reverse(profile, kwargs={
'username': 'bob'
})
response = self.client.get(dashboard_url)
self.assertContains(
response, 'href="#replace-transportation_2011_07_25"')
# a non owner can't see the markup
response = self.anon.get(self.url)
self.assertNotContains(
response, 'href="#replace-transportation_2011_07_25"')
# when we have a submission, we cant update the xls form
self._submit_transport_instance()
response = self.client.get(dashboard_url)
self.assertNotContains(
response, 'href="#replace-transportation_2011_07_25"')
def test_non_owner_cannot_replace_form(self):
"""
Test that a non owner cannot replace a shared xls form
"""
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.xform.shared = True
self.xform.save()
# create and login another user
self._create_user_and_login('peter', 'peter')
response = self.client.post(xform_update_url)
# since we are logged in, we'll be re-directed to our profile page
self.assertRedirects(response, self.base_url,
status_code=302, target_status_code=302)
def test_replace_xform(self):
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
count = XForm.objects.count()
xls_path = os.path.join(self.this_directory, "fixtures",
"transportation", "transportation_updated.xls")
with open(xls_path, "r") as xls_file:
post_data = {'xls_file': xls_file}
self.client.post(xform_update_url, post_data)
self.assertEqual(XForm.objects.count(), count)
self.xform = XForm.objects.order_by('id').reverse()[0]
data_dictionary = self.xform.data_dictionary()
# look for the preferred_means question
# which is only in the updated xls
is_updated_form = len([e.name for e in data_dictionary.survey_elements
if e.name == u'preferred_means']) > 0
self.assertTrue(is_updated_form)
def test_update_form_doesnt_truncate_to_50_chars(self):
count = XForm.objects.count()
xls_path = os.path.join(
self.this_directory,
"fixtures",
"transportation",
"transportation_with_long_id_string.xls")
self._publish_xls_file_and_set_xform(xls_path)
# Update the form
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
updated_xls_path = os.path.join(
self.this_directory,
"fixtures",
"transportation",
"transportation_with_long_id_string_updated.xls")
with open(updated_xls_path, "r") as xls_file:
post_data = {'xls_file': xls_file}
self.client.post(xform_update_url, post_data)
# Count should stay the same
self.assertEqual(XForm.objects.count(), count + 1)
self.xform = XForm.objects.order_by('id').reverse()[0]
data_dictionary = self.xform.data_dictionary()
# look for the preferred_means question
# which is only in the updated xls
is_updated_form = len([e.name for e in data_dictionary.survey_elements
if e.name == u'preferred_means']) > 0
self.assertTrue(is_updated_form)
def test_xform_delete(self):
id_string = self.xform.id_string
form_exists = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 1
self.assertTrue(form_exists)
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': id_string
})
self.client.post(xform_delete_url)
form_deleted = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 0
self.assertTrue(form_deleted)
def test_non_owner_cant_delete_xform(self):
id_string = self.xform.id_string
form_exists = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 1
self.assertTrue(form_exists)
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': id_string
})
# save current user before we re-assign
bob = self.user
self._create_user_and_login('alice', 'alice')
self.client.post(xform_delete_url)
form_deleted = XForm.objects.filter(
user=bob, id_string=id_string).count() == 0
self.assertFalse(form_deleted)
def test_xform_delete_cascades_mongo_instances(self):
initial_mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
# submit instance
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
# check mongo record exists
mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
# delete form
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.client.post(xform_delete_url)
mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
self.assertEqual(mongo_count, initial_mongo_count)
def test_enketo_preview(self):
url = reverse(
enketo_preview, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_enketo_preview_works_on_shared_forms(self):
self.xform.shared = True
self.xform.save()
url = reverse(
enketo_preview, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.anon.get(url)
self.assertEqual(response.status_code, 302)
# TODO PLD disabling this test
@skip('Insensitivity is not enforced upon creation of id_strings.')
def test_form_urls_case_insensitive(self):
url = reverse(show, kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper()
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_publish_xml_xlsform_download(self):
count = XForm.objects.count()
path = os.path.join(
self.this_directory, '..', '..', 'api', 'tests', 'fixtures',
'forms', 'contributions', 'contributions.xml')
f = open(path)
xml_file = ContentFile(f.read())
f.close()
xml_file.name = 'contributions.xml'
self.xform = publish_xml_form(xml_file, self.user)
self.assertTrue(XForm.objects.count() > count)
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'contributions'
}), follow=True)
self.assertContains(response, 'No XLS file for your form ')
| jomolinare/kobocat | onadata/apps/main/tests/test_form_show.py | Python | bsd-2-clause | 18,586 |
#
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import sys
import traceback
from glob import glob
from os.path import abspath, basename, dirname, exists
# Import Numpy
import numpy as np
# Local imports
from seisflows.config import ParameterError
from seisflows.workflow.base import base
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
preprocess = sys.modules['seisflows_preprocess']
class test_preprocess(base):
""" Signal processing integration test
"""
def check(self):
""" Checks parameters and paths
"""
# data file format
if 'FORMAT' not in PAR:
raise ParameterError(PAR, 'FORMAT')
# data normalization option
if 'NORMALIZE' not in PAR:
setattr(PAR, 'NORMALIZE', None)
# data muting option
if 'MUTE' not in PAR:
setattr(PAR, 'MUTE', None)
# data filtering option
if 'FILTER' not in PAR:
setattr(PAR, 'FILTER', None)
if 'DATA' not in PATH:
raise Exception
if not exists(PATH.DATA):
raise Exception
if 'SYNTHETICS' not in PATH:
setattr(PATH, 'SYNTHETICS', '')
if PATH.SYNTHETICS:
assert exists(PATH.SYNTHETICS)
if 'WORKDIR' not in PATH:
setattr(PATH, 'WORKDIR', abspath('.'))
def main(self):
""" Tests data processing methods
"""
print 'testing reader...'
data = self.test_reader()
print 'testing writer...'
self.test_writer(data)
if PAR.NORMALIZE:
print 'testing normalizing...'
self.test_normalize(data)
if PAR.FILTER:
print 'testing filtering...'
self.test_filter(data)
if PAR.MUTE:
print 'testing muting...'
self.test_mute(data)
if PAR.MISFIT and \
PATH.DATA and \
PATH.SYNTHETICS:
dat = preprocess.reader(dirname(PATH.DATA),
basename(PATH.DATA))
syn = preprocess.reader(dirname(PATH.SYNTHETICS),
basename(PATH.SYNTHETICS))
print 'testing misfit...'
self.test_misfit(dat, syn)
print 'testing adjoint...'
self.test_adjoint(dat, syn)
print 'SUCCESS\n'
def test_reader(self):
try:
preprocess.setup()
except Exception, e:
print 'setup FAILED\n'
sys.exit(-1)
try:
data = preprocess.reader(dirname(PATH.DATA),
basename(PATH.DATA))
except Exception, e:
print 'reader FAILED'
sys.exit(-1)
else:
print ''
return data
def test_writer(self, data):
try:
if PAR.FORMAT in ['SU', 'su']:
extension = '.su'
else:
extension = ''
preprocess.writer(data, PATH.WORKDIR, 'output_data'+extension)
except Exception, e:
print 'writer FAILED\n'
print e.message
print e.__class__.__name__
traceback.print_exc(e)
sys.exit(-1)
else:
print ''
def test_normalize(self, dat):
try:
out = preprocess.apply_normalize(dat)
except Exception, e:
print 'normalization FAILED\n'
print e.message
print e.__class__.__name__
traceback.print_exc(e)
sys.exit(-1)
else:
self.save(out, 'output_data_normalized')
print ''
def test_filter(self, dat):
try:
out = preprocess.apply_filter(dat)
except Exception, e:
print 'filtering FAILED\n'
print e.message
print e.__class__.__name__
traceback.print_exc(e)
sys.exit(-1)
else:
self.save(out, 'output_data_filtered')
print ''
def test_mute(self, dat):
try:
out = preprocess.apply_mute(dat)
except Exception, e:
print 'muting FAILED\n'
print e.message
print e.__class__.__name__
traceback.print_exc(e)
sys.exit(-1)
else:
self.save(out, 'output_data_muted')
print ''
def test_misfit(self, dat, syn):
nt, dt, _ = preprocess.get_time_scheme(syn)
nn, _ = preprocess.get_network_size(syn)
rsd = []
for ii in range(nn):
rsd.append(preprocess.misfit(syn[ii].data, dat[ii].data, nt, dt))
filename = PATH.WORKDIR+'/'+'output_misfit'
np.savetxt(filename, rsd)
print ''
def test_adjoint(self, dat, syn):
nt, dt, _ = preprocess.get_time_scheme(syn)
nn, _ = preprocess.get_network_size(syn)
adj = syn
for ii in range(nn):
adj[ii].data = preprocess.adjoint(syn[ii].data, dat[ii].data, nt,
dt)
self.save(adj, 'output_adjoint')
print ''
def save(self, data, filename):
if PAR.FORMAT in ['SU', 'su']:
extension = '.su'
else:
extension = ''
preprocess.writer(data, PATH.WORKDIR, filename+extension)
| rmodrak/seisflows | seisflows/workflow/test_preprocess.py | Python | bsd-2-clause | 5,497 |
import collections
import json
import os
import sys
import hashlib
import logging
from .utils import cached_property, get_resource
from .graphics import Image
def load_json(filename):
try:
with open(filename, 'r') as fp:
result = json.load(fp)
if not isinstance(result, dict):
raise ValueError('Failed to load %s because it should contain a dictionary object, not an array.' % filename)
return result
except ValueError:
raise ValueError('Failed to load %s because it\'s not a valid JSON file' % filename)
except IOError:
#either non-existent file or empty filename
return {}
def save_json(filename, obj):
data = json.dumps(obj)
with open(filename, 'w') as fp:
fp.write(data)
class SettingsDict(collections.MutableMapping):
'''
Represents the tingapp.settings dict-like object.
The settings are loaded from three files in the app bundle
- default_settings.json
This file contains default settings as defined by the app creator
- settings.json
This file contains settings as set by a user when installing the app
(via Tide, for example)
- local_settings.json
This file contains settings written by the app itself.
Settings can be overridden by later files.
Changes are always saved to the local_settings.json file.
'''
def __init__(self, path):
#note we do NOT initialise self.dct or self.local_settings here - this ensures we
#raise an error in the event that they are accessed before self.load
self.loaded = False
self.path = path
def __contains__(self, item):
if not self.loaded:
self.load()
return item in self.dct
def __len__(self):
if not self.loaded:
self.load()
return len(self.dct)
def __getitem__(self, key):
if not self.loaded:
self.load()
return self.dct[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self.dct[key] = value
self.local_settings[key] = value
self.save()
def __delitem__(self, key):
if not self.loaded:
self.load()
del self.local_settings[key]
def __iter__(self):
if not self.loaded:
self.load()
return iter(self.dct)
def load(self):
self.dct = load_json(os.path.join(self.path, 'default_settings.json'))
self.dct.update(load_json(os.path.join(self.path, 'settings.json')))
self.local_settings = load_json(os.path.join(self.path, 'local_settings.json'))
self.dct.update(self.local_settings)
self.loaded = True
def save(self):
save_json(os.path.join(self.path, 'local_settings.json'), self.local_settings)
def generic_icon(name):
name_hash = int(hashlib.md5(name).hexdigest(), 16)
color_options = [
'blue', 'teal', 'green', 'olive', 'yellow', 'orange', 'red',
'fuchsia', 'purple', 'maroon'
]
color = color_options[name_hash % len(color_options)]
letter = name[0].lower()
icon = Image(size=(96, 96))
icon.fill(color=color)
image = get_resource('default-icon-texture-96.png')
icon.image(image)
font = get_resource('MiniSet2.ttf')
descenders = ['g', 'p', 'q', 'y']
ascenders = ['b', 'd', 'f', 'h', 'k', 'l', 't']
y_offset = 0
if letter in descenders:
y_offset -= 8
if letter in ascenders:
y_offset += 6
icon.text(letter,
xy=(52, 41 + y_offset),
color='white',
font=font,
font_size=70)
# they're a little large compared to the real icons, let's size them down a bit
resized_icon = Image(size=(96,96))
resized_icon.image(icon, scale=0.9)
return resized_icon
class TingApp(object):
def __init__(self, path=None):
"""path is the root path of the app you want to inspect
if path is None, then will let you inspect the current app"""
if path is None:
path = os.path.dirname(os.path.abspath(sys.argv[0]))
self.path = path
self.settings = SettingsDict(path)
@cached_property
def info(self):
return load_json(os.path.join(self.path, 'app.tbinfo'))
@property
def name(self):
if 'name' in self.info and self.info['name'] != '':
return self.info['name']
else:
return os.path.basename(self.path)
@cached_property
def icon(self):
icon_path = os.path.join(self.path, 'icon.png')
if not os.path.isfile(icon_path):
return generic_icon(self.name)
try:
icon = Image.load(icon_path)
except:
logging.exception('Failed to load icon at %s', icon_path)
return generic_icon(self.name)
if icon.size != (96, 96):
# resize the icon by redrawing in the correct size
resized_icon = Image(size=(96, 96))
resized_icon.image(icon, scale='shrinkToFit')
return resized_icon
return icon
app = TingApp()
| furbrain/tingbot-python | tingbot/tingapp.py | Python | bsd-2-clause | 5,174 |
"""
.. _ex-morph-surface:
=============================
Morph surface source estimate
=============================
This example demonstrates how to morph an individual subject's
:class:`mne.SourceEstimate` to a common reference space. We achieve this using
:class:`mne.SourceMorph`. Pre-computed data will be morphed based on
a spherical representation of the cortex computed using the spherical
registration of :ref:`FreeSurfer <tut-freesurfer>`
(https://surfer.nmr.mgh.harvard.edu/fswiki/SurfaceRegAndTemplates) [1]_. This
transform will be used to morph the surface vertices of the subject towards the
reference vertices. Here we will use 'fsaverage' as a reference space (see
https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage).
The transformation will be applied to the surface source estimate. A plot
depicting the successful morph will be created for the spherical and inflated
surface representation of ``'fsaverage'``, overlaid with the morphed surface
source estimate.
References
----------
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
.. note:: For background information about morphing see :ref:`ch_morph`.
"""
# Author: Tommy Clausner <tommy.clausner@gmail.com>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
print(__doc__)
###############################################################################
# Setup paths
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
###############################################################################
# Load example data
# Read stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
###############################################################################
# Setting up SourceMorph for SourceEstimate
# -----------------------------------------
#
# In MNE surface source estimates represent the source space simply as
# lists of vertices (see
# :ref:`tut-source-estimate-class`).
# This list can either be obtained from
# :class:`mne.SourceSpaces` (src) or from the ``stc`` itself.
#
# Since the default ``spacing`` (resolution of surface mesh) is ``5`` and
# ``subject_to`` is set to 'fsaverage', :class:`mne.SourceMorph` will use
# default ico-5 ``fsaverage`` vertices to morph, which are the special
# values ``[np.arange(10242)] * 2``.
#
# .. note:: This is not generally true for other subjects! The set of vertices
# used for ``fsaverage`` with ico-5 spacing was designed to be
# special. ico-5 spacings for other subjects (or other spacings
# for fsaverage) must be calculated and will not be consecutive
# integers.
#
# If src was not defined, the morph will actually not be precomputed, because
# we lack the vertices *from* that we want to compute. Instead the morph will
# be set up and when applying it, the actual transformation will be computed on
# the fly.
#
# Initialize SourceMorph for SourceEstimate
morph = mne.compute_source_morph(stc, subject_from='sample',
subject_to='fsaverage',
subjects_dir=subjects_dir)
###############################################################################
# Apply morph to (Vector) SourceEstimate
# --------------------------------------
#
# The morph will be applied to the source estimate data, by giving it as the
# first argument to the morph we computed above.
stc_fsaverage = morph.apply(stc)
###############################################################################
# Plot results
# ------------
# Define plotting parameters
surfer_kwargs = dict(
hemi='lh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=0.09, time_unit='s', size=(800, 800),
smoothing_steps=5)
# As spherical surface
brain = stc_fsaverage.plot(surface='sphere', **surfer_kwargs)
# Add title
brain.add_text(0.1, 0.9, 'Morphed to fsaverage (spherical)', 'title',
font_size=16)
###############################################################################
# As inflated surface
brain_inf = stc_fsaverage.plot(surface='inflated', **surfer_kwargs)
# Add title
brain_inf.add_text(0.1, 0.9, 'Morphed to fsaverage (inflated)', 'title',
font_size=16)
###############################################################################
# Reading and writing SourceMorph from and to disk
# ------------------------------------------------
#
# An instance of SourceMorph can be saved, by calling
# :meth:`morph.save <mne.SourceMorph.save>`.
#
# This method allows for specification of a filename under which the ``morph``
# will be save in ".h5" format. If no file extension is provided, "-morph.h5"
# will be appended to the respective defined filename::
#
# >>> morph.save('my-file-name')
#
# Reading a saved source morph can be achieved by using
# :func:`mne.read_source_morph`::
#
# >>> morph = mne.read_source_morph('my-file-name-morph.h5')
#
# Once the environment is set up correctly, no information such as
# ``subject_from`` or ``subjects_dir`` must be provided, since it can be
# inferred from the data and use morph to 'fsaverage' by default. SourceMorph
# can further be used without creating an instance and assigning it to a
# variable. Instead :func:`mne.compute_source_morph` and
# :meth:`mne.SourceMorph.apply` can be
# easily chained into a handy one-liner. Taking this together the shortest
# possible way to morph data directly would be:
stc_fsaverage = mne.compute_source_morph(stc,
subjects_dir=subjects_dir).apply(stc)
| mne-tools/mne-tools.github.io | 0.20/_downloads/e414d894f3f4079b3e5897dd9c691af7/plot_morph_surface_stc.py | Python | bsd-3-clause | 5,938 |
from _pydev_imps._pydev_saved_modules import threading
def wrapper(fun):
def pydev_after_run_call():
pass
def inner(*args, **kwargs):
fun(*args, **kwargs)
pydev_after_run_call()
return inner
def wrap_attr(obj, attr):
t_save_start = getattr(obj, attr)
setattr(obj, attr, wrapper(t_save_start))
obj._pydev_run_patched = True
class ObjectWrapper(object):
def __init__(self, obj):
self.wrapped_object = obj
try:
import functools
functools.update_wrapper(self, obj)
except:
pass
def __getattr__(self, attr):
orig_attr = getattr(self.wrapped_object, attr) #.__getattribute__(attr)
if callable(orig_attr):
def patched_attr(*args, **kwargs):
self.call_begin(attr)
result = orig_attr(*args, **kwargs)
self.call_end(attr)
if result == self.wrapped_object:
return self
return result
return patched_attr
else:
return orig_attr
def call_begin(self, attr):
pass
def call_end(self, attr):
pass
def __enter__(self):
self.call_begin("__enter__")
self.wrapped_object.__enter__()
self.call_end("__enter__")
def __exit__(self, exc_type, exc_val, exc_tb):
self.call_begin("__exit__")
self.wrapped_object.__exit__(exc_type, exc_val, exc_tb)
def factory_wrapper(fun):
def inner(*args, **kwargs):
obj = fun(*args, **kwargs)
return ObjectWrapper(obj)
return inner
def wrap_threads():
# TODO: add wrappers for thread and _thread
# import _thread as mod
# print("Thread imported")
# mod.start_new_thread = wrapper(mod.start_new_thread)
import threading
threading.Lock = factory_wrapper(threading.Lock)
threading.RLock = factory_wrapper(threading.RLock)
# queue patching
try:
import queue # @UnresolvedImport
queue.Queue = factory_wrapper(queue.Queue)
except:
import Queue
Queue.Queue = factory_wrapper(Queue.Queue)
| SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydevd_concurrency_analyser/pydevd_thread_wrappers.py | Python | bsd-3-clause | 2,233 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for `python -m` to make running tools simpler.
A tool is defined as a python module with a __main__.py file. This latter file
is run by the present script.
In particular, allows gclient to change directories when running hooks for
infra.
"""
assert __name__ == '__main__'
import imp
import os
import sys
RUNPY_PATH = os.path.abspath(__file__)
ROOT_PATH = os.path.dirname(RUNPY_PATH)
ENV_PATH = os.path.join(ROOT_PATH, 'ENV')
# Do not want to mess with sys.path, load the module directly.
run_helper = imp.load_source(
'run_helper', os.path.join(ROOT_PATH, 'bootstrap', 'run_helper.py'))
sys.exit(run_helper.run_py_main(sys.argv[1:], RUNPY_PATH, ENV_PATH, 'infra'))
| nicko96/Chrome-Infra | run.py | Python | bsd-3-clause | 872 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
# create a crazy implicit function
center = output.GetCenter()
sphere = vtk.vtkSphere()
sphere.SetCenter(center)
sphere.SetRadius(2.0)
sphere2 = vtk.vtkSphere()
sphere2.SetCenter(center[0] + 4.0, center[1], center[2])
sphere2.SetRadius(4.0)
boolOp = vtk.vtkImplicitBoolean()
boolOp.SetOperationTypeToUnion()
boolOp.AddFunction(sphere)
boolOp.AddFunction(sphere2)
# clip the structured grid to produce a tetrahedral mesh
clip = vtk.vtkClipDataSet()
clip.SetInputData(output)
clip.SetClipFunction(boolOp)
clip.InsideOutOn()
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(clip.GetOutputPort())
clipMapper = vtk.vtkPolyDataMapper()
clipMapper.SetInputConnection(gf.GetOutputPort())
clipActor = vtk.vtkActor()
clipActor.SetMapper(clipMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(clipActor)
ren1.AddActor(outlineActor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(250, 250)
ren1.SetBackground(0.1, 0.2, 0.4)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(-12.3332, 31.7479, 41.2387)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
iren.Initialize()
# render the image
#
renWin.Render()
#iren.Start()
| hlzz/dotfiles | graphics/VTK-7.0.0/Filters/General/Testing/Python/clipComb.py | Python | bsd-3-clause | 2,190 |
# Copyright (C) 2015 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
#
"""
Create, read and write yambo input files
Read, modify and write yambo databases
Analyse results from yambo calculations
Modules:
io
- YamboIn: read, write and manipulate yambo input files
- YamboOut: read yambo output files and save in .json
dbs
- YamboSaveDB: read information in the ns.db1
- YamboLatticeDB: read lattice parameters, symmetries and k-points from ns.db1
- YamboDipolesDB: dipole matrix elements from ndb.dip*
- YamboStaticScreeningDB: static dielectric screening from ndb.em1s*
- YamboElectronsDB: read the electronic states from ns.db1
- YamboQPDB: read the quasiparticle energies db ndb.QP
- YamboGreenDB: read the green's functions calculated using yambo
bse
- YamboExcitonWaveFunctionXSF: read the excitonic
- YamboExcitonWeight: read the excitonic weights from the ypp output file
- YamboBSEAbsorptionSpectra: generate a .json file with the bse absorption calculation (including information about the excitons)
analyse:
- YamboAnalyser: read .json files generated with yamboout and plot them together
- recipes: user contributed scripts
"""
import numpy as np
from yambopy.jsonencoder import *
from yambopy.plot import *
from yambopy.units import *
#lattice stuff
from yambopy.lattice import *
#yambo databases
from yambopy.dbs.savedb import *
from yambopy.dbs.dipolesdb import *
from yambopy.dbs.qpdb import *
from yambopy.dbs.em1sdb import *
from yambopy.dbs.greendb import *
from yambopy.dbs.latticedb import *
from yambopy.dbs.electronsdb import *
from yambopy.dbs.rtdb import *
from yambopy.dbs.excitondb import *
from yambopy.dbs.wfdb import *
from yambopy.dbs.elphondb import *
#input/output files
from yambopy.io.inputfile import *
from yambopy.io.outputfile import *
#bse/excitons files
from yambopy.bse.excitonwf import *
from yambopy.bse.excitonweight import *
from yambopy.bse.bse_absorption import *
#analyse stuff
from yambopy.analyse import *
from yambopy.recipes import *
| henriquemiranda/yambopy | yambopy/__init__.py | Python | bsd-3-clause | 2,164 |
"""Test the split module"""
from __future__ import division
import warnings
import pytest
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.model_selection._split import CV_WARNING
from sklearn.model_selection._split import NSPLIT_WARNING
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.utils.fixes import comb
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size='default',\n train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_2d_y():
# smoke test for 2d y and multi-label
n_samples = 30
rng = np.random.RandomState(1)
X = rng.randint(0, 3, size=(n_samples, 2))
y = rng.randint(0, 3, size=(n_samples,))
y_2d = y.reshape(-1, 1)
y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
groups = rng.randint(0, 3, size=(n_samples,))
splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(),
RepeatedKFold(), RepeatedStratifiedKFold(),
ShuffleSplit(), StratifiedShuffleSplit(test_size=.5),
GroupShuffleSplit(), LeaveOneGroupOut(),
LeavePGroupsOut(n_groups=2), GroupKFold(), TimeSeriesSplit(),
PredefinedSplit(test_fold=groups)]
for splitter in splitters:
list(splitter.split(X, y, groups))
list(splitter.split(X, y_2d, groups))
try:
list(splitter.split(X, y_multilabel, groups))
except ValueError as e:
allowed_target_types = ('binary', 'multiclass')
msg = "Supported target types are: {}. Got 'multilabel".format(
allowed_target_types)
assert msg in str(e)
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
# Test if the two splits are different cv
for (_, test_a), (_, test_b) in zip(cv.split(*data),
cv.split(*data)):
# cv.split(...) returns an array of tuples, each tuple
# consisting of an array with train indices and test indices
with pytest.raises(AssertionError,
message="The splits for data, are same even"
" when random state is not set"):
np.testing.assert_array_equal(test_a, test_b)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
@ignore_warnings
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
def test_stratified_shuffle_split_multilabel():
# fix for issue 9037
for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
# correct stratification of entire rows
# (by design, here y[:, 0] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 0])
assert_equal(expected_ratio, np.mean(y_train[:, 0]))
assert_equal(expected_ratio, np.mean(y_test[:, 0]))
def test_stratified_shuffle_split_multilabel_many_labels():
# fix in PR #9922: for multilabel data with > 1000 labels, str(row)
# truncates with an ellipsis for elements in positions 4 through
# len(row) - 4, so labels were not being correctly split using the powerset
# method for transforming a multilabel problem to a multiclass one; this
# test checks that this problem is fixed.
row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1]
row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1]
y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100)
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# correct stratification of entire rows
# (by design, here y[:, 4] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 4])
assert_equal(expected_ratio, np.mean(y_train[:, 4]))
assert_equal(expected_ratio, np.mean(y_test[:, 4]))
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = np.full(10, -1.)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1. / 3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
# check get_n_splits() with dummy parameters
assert_equal(logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']), 3)
assert_equal(logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]), 3)
assert_equal(lpgo_2.get_n_splits(None, None, np.arange(4)), 6)
assert_equal(lpgo_1.get_n_splits(groups=np.arange(4)), 4)
# raise ValueError if a `groups` parameter is illegal
with assert_raises(ValueError):
logo.get_n_splits(None, None, [0.0, np.nan, 0.0])
with assert_raises(ValueError):
lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0])
msg = "The 'groups' parameter should not be None."
assert_raise_message(ValueError, msg,
logo.get_n_splits, None, None, None)
assert_raise_message(ValueError, msg,
lpgo_1.get_n_splits, None, None, None)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ({}). "
"LeaveOneGroupOut expects at least 2.").format(groups)
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
@ignore_warnings
def test_repeated_cv_value_errors():
# n_repeats is not integer or <= 0
for cv in (RepeatedKFold, RepeatedStratifiedKFold):
assert_raises(ValueError, cv, n_repeats=0)
assert_raises(ValueError, cv, n_repeats=1.5)
def test_repeated_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
random_state = 258173307
rkf = RepeatedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rkf.split(X)
train, test = next(splits)
assert_array_equal(train, [2, 4])
assert_array_equal(test, [0, 1, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 3])
assert_array_equal(test, [2, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3, 4])
assert_array_equal(test, [0, 1])
assert_raises(StopIteration, next, splits)
def test_get_n_splits_for_repeated_kfold():
n_splits = 3
n_repeats = 4
rkf = RepeatedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rkf.get_n_splits())
def test_get_n_splits_for_repeated_stratified_kfold():
n_splits = 3
n_repeats = 4
rskf = RepeatedStratifiedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rskf.get_n_splits())
def test_repeated_stratified_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
y = [1, 1, 1, 0, 0]
random_state = 1944695409
rskf = RepeatedStratifiedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rskf.split(X, y)
train, test = next(splits)
assert_array_equal(train, [1, 4])
assert_array_equal(test, [0, 2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 2, 3])
assert_array_equal(test, [1, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3])
assert_array_equal(test, [0, 1, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 4])
assert_array_equal(test, [2, 3])
assert_raises(StopIteration, next, splits)
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
with warnings.catch_warnings():
# JvR: Currently, a future warning is raised if test_size is not
# given. As that is the point of this test, ignore the future warning
warnings.filterwarnings("ignore", category=FutureWarning)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
assert_raises(ValueError, train_test_split, range(10),
shuffle=False, stratify=True)
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
# test unshuffled split
y = np.arange(10)
for test_size in [2, 0.2]:
train, test = train_test_split(y, shuffle=False, test_size=test_size)
assert_array_equal(test, [8, 9])
assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7])
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
@ignore_warnings
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
# also works with 2d multiclass
y_multiclass_2d = y_multiclass.reshape(-1, 1)
cv = check_cv(3, y_multiclass_2d, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass_2d)),
list(cv.split(X, y_multiclass_2d)))
assert_false(np.all(
next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] ==
next(KFold(3).split(X, y_multiclass_2d))[0]))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
# numpy's assert_array_equal properly compares nested lists
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
splits_are_equal = True
except AssertionError:
splits_are_equal = False
assert_false(splits_are_equal, "If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def _check_time_series_max_train_size(splits, check_splits, max_train_size):
for (train, test), (check_train, check_test) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert_true(len(check_train) <= max_train_size)
suffix_start = max(len(train) - max_train_size, 0)
assert_array_equal(check_train, train[suffix_start:])
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=3)
# Test for the case where the size of a fold is greater than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
# Test for the case where the size of each fold is less than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv, error_score='raise', iid=False)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_train_test_default_warning():
assert_warns(FutureWarning, ShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, GroupShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, StratifiedShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, train_test_split, range(3),
train_size=0.75)
def test_nsplit_default_warn():
# Test that warnings are raised. Will be removed in 0.22
assert_warns_message(FutureWarning, NSPLIT_WARNING, KFold)
assert_warns_message(FutureWarning, NSPLIT_WARNING, GroupKFold)
assert_warns_message(FutureWarning, NSPLIT_WARNING, StratifiedKFold)
assert_warns_message(FutureWarning, NSPLIT_WARNING, TimeSeriesSplit)
assert_no_warnings(KFold, n_splits=5)
assert_no_warnings(GroupKFold, n_splits=5)
assert_no_warnings(StratifiedKFold, n_splits=5)
assert_no_warnings(TimeSeriesSplit, n_splits=5)
def test_check_cv_default_warn():
# Test that warnings are raised. Will be removed in 0.22
assert_warns_message(FutureWarning, CV_WARNING, check_cv)
assert_warns_message(FutureWarning, CV_WARNING, check_cv, None)
assert_no_warnings(check_cv, cv=5)
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| vortex-ape/scikit-learn | sklearn/model_selection/tests/test_split.py | Python | bsd-3-clause | 57,882 |
from bokeh.io import save
from bokeh.models import MultiSelect, Tabs
select = MultiSelect(options=["First option", "Second option"])
tabs = Tabs(tabs=[("A tab", select)], width=300)
save(tabs)
| ericmjl/bokeh | examples/integration/widgets/tabs_with_multiselect.py | Python | bsd-3-clause | 195 |
from django.contrib.contenttypes.models import ContentType
import factory
from glitter.models import Version
from glitter.pages.models import Page
class PageFactory(factory.DjangoModelFactory):
url = factory.Sequence(lambda n: '/page-{}/'.format(n))
title = factory.Sequence(lambda n: 'Page {}'.format(n))
class Meta:
model = Page
class VersionFactory(factory.DjangoModelFactory):
object_id = factory.SelfAttribute('content_object.id')
content_type = factory.LazyAttribute(
lambda o: ContentType.objects.get_for_model(o.content_object),
)
class Meta:
exclude = ('content_object',)
abstract = True
class PageVersionFactory(VersionFactory):
content_object = factory.SubFactory(PageFactory)
class Meta:
model = Version
@factory.post_generation
def set_version(self, create, extracted, **kwargs):
if extracted:
page = self.content_object
page.current_version = self
if create:
page.save()
| developersociety/django-glitter | glitter/tests/factories.py | Python | bsd-3-clause | 1,042 |
# pylint: disable=W0611
# flake8: noqa
from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.core.sparse.series import SparseSeries
from pandas.core.sparse.frame import SparseDataFrame
| harisbal/pandas | pandas/core/sparse/api.py | Python | bsd-3-clause | 206 |
import scrapy
from scrapy.crawler import CrawlerProcess
class IPv6Spider(scrapy.Spider):
name = "ipv6_spider"
start_urls = ["http://[::1]"]
process = CrawlerProcess(settings={"RETRY_ENABLED": False})
process.crawl(IPv6Spider)
process.start()
| starrify/scrapy | tests/CrawlerProcess/default_name_resolver.py | Python | bsd-3-clause | 254 |
#
# This file is part of GreatFET
#
from ..interface import GreatFETInterface
class PatternGenerator(GreatFETInterface):
"""
Class that supports using the GreatFET as a simple pattern generator.
"""
def __init__(self, board, sample_rate=1e6, bus_width=8):
""" Set up a GreatFET pattern generator object. """
# Grab a reference to the board and its pattern-gen API.
self.board = board
self.api = board.apis.pattern_generator
# Grab a reference to the user's bus parameters.
self.sample_rate = int(sample_rate)
self.bus_width = bus_width
# FIXME: These should be read from the board, rather than hardcoded!
self.upload_chunk_size = 2048
self.samples_max = 32 * 1024
def set_sample_rate(self, sample_rate):
""" Updates the generator's sample rates. """
self.sample_rate = int(sample_rate)
def _upload_samples(self, samples):
""" Uploads a collection of samples into the board's sample memory; precedes scan-out of those samples. """
# Iterate over the full set of provided samples, uploading them in chunks.
for offset in range(0, len(samples), self.upload_chunk_size):
chunk = samples[offset:offset + self.upload_chunk_size]
self.api.upload_samples(offset, chunk)
def scan_out_pattern(self, samples, repeat=True):
""" Sends a collection of fixed samples to the board, and then instructs it to repeatedly """
samples = bytes(samples)
# Upload the samples to be scanned out...
self._upload_samples(samples)
# ... and then trigger the scan-out itself.
self.api.generate_pattern(self.sample_rate, self.bus_width, len(samples), repeat)
def stop(self):
""" Stops the board from scanning out any further samples. """
self.api.stop()
def dump_sgpio_config(self, include_unused=False):
""" Debug function; returns the board's dumped SGPIO configuration. """
self.api.dump_sgpio_configuration(include_unused)
return self.board.read_debug_ring()
| dominicgs/GreatFET-experimental | host/greatfet/interfaces/pattern_generator.py | Python | bsd-3-clause | 2,137 |
"""Example macro."""
revision = "$Rev$"
url = "$URL$"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
from trac.util import escape
def execute(hdf, txt, env):
# Currently hdf is set only when the macro is called
# From a wiki page
if hdf:
hdf['wiki.macro.greeting'] = 'Hello World'
# args will be `None` if the macro is called without parenthesis.
args = txt or 'No arguments'
# then, as `txt` comes from the user, it's important to guard against
# the possibility to inject malicious HTML/Javascript, by using `escape()`:
return 'Hello World, args = ' + escape(args)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from trac.wiki.macros import WikiMacroBase
class HelloWorldMacro(WikiMacroBase):
_description = cleandoc_(
"""Simple HelloWorld macro.
Note that the name of the class is meaningful:
- it must end with "Macro"
- what comes before "Macro" ends up being the macro name
The documentation of the class (i.e. what you're reading)
will become the documentation of the macro, as shown by
the !MacroList macro (usually used in the TracWikiMacros page).
""")
def expand_macro(self, formatter, name, args):
"""Return some output that will be displayed in the Wiki content.
`name` is the actual name of the macro (no surprise, here it'll be
`'HelloWorld'`),
`args` is the text enclosed in parenthesis at the call of the macro.
Note that if there are ''no'' parenthesis (like in, e.g.
[[HelloWorld]]), then `args` is `None`.
"""
return 'Hello World, args = ' + unicode(args)
# Note that there's no need to HTML escape the returned data,
# as the template engine (Genshi) will do it for us.
# --
# ---- (reuse for your own macro) ----
| moreati/trac-gitsvn | sample-plugins/HelloWorld.py | Python | bsd-3-clause | 2,080 |
# -*- coding: utf-8 -*-
"""
flaskext.sqlalchemy
~~~~~~~~~~~~~~~~~~~
Adds basic SQLAlchemy support to your application.
:copyright: (c) 2014 by Armin Ronacher, Daniel Neuhäuser.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement, absolute_import
import os
import re
import sys
import time
import functools
import warnings
import sqlalchemy
from math import ceil
from functools import partial
from flask import _request_ctx_stack, abort, has_request_context, request
from flask.signals import Namespace
from operator import itemgetter
from threading import Lock
from sqlalchemy import orm, event, inspect
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.session import Session as SessionBase
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from flask_sqlalchemy._compat import iteritems, itervalues, xrange, \
string_types
# the best timer function for the platform
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
try:
from flask import _app_ctx_stack
except ImportError:
_app_ctx_stack = None
__version__ = '2.0'
# Which stack should we use? _app_ctx_stack is new in 0.9
connection_stack = _app_ctx_stack or _request_ctx_stack
_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
_signals = Namespace()
models_committed = _signals.signal('models-committed')
before_models_committed = _signals.signal('before-models-committed')
def _make_table(db):
def _make_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return _make_table
def _set_default_query_class(d):
if 'query_class' not in d:
d['query_class'] = BaseQuery
def _wrap_with_default_query_class(fn):
@functools.wraps(fn)
def newfn(*args, **kwargs):
_set_default_query_class(kwargs)
if "backref" in kwargs:
backref = kwargs['backref']
if isinstance(backref, string_types):
backref = (backref, {})
_set_default_query_class(backref[1])
return fn(*args, **kwargs)
return newfn
def _include_sqlalchemy(obj):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# Note: obj.Table does not attempt to be a SQLAlchemy Table class.
obj.Table = _make_table(obj)
obj.relationship = _wrap_with_default_query_class(obj.relationship)
obj.relation = _wrap_with_default_query_class(obj.relation)
obj.dynamic_loader = _wrap_with_default_query_class(obj.dynamic_loader)
obj.event = event
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement,
self.parameters,
self.duration
)
def _calling_context(app_path):
frm = sys._getframe(1)
while frm.f_back is not None:
name = frm.f_globals.get('__name__')
if name and (name == app_path or name.startswith(app_path + '.')):
funcname = frm.f_code.co_name
return '%s:%s (%s)' % (
frm.f_code.co_filename,
frm.f_lineno,
funcname
)
frm = frm.f_back
return '<unknown>'
class SignallingSession(SessionBase):
"""The signalling session is the default session that Flask-SQLAlchemy
uses. It extends the default session system with bind selection and
modification tracking.
If you want to use a different session you can override the
:meth:`SQLAlchemy.create_session` function.
.. versionadded:: 2.0
"""
def __init__(self, db, autocommit=False, autoflush=True, **options):
#: The application that this session belongs to.
self.app = app = db.get_app()
track_modifications = app.config['SQLALCHEMY_TRACK_MODIFICATIONS']
bind = options.pop('bind', None) or db.engine
if track_modifications is None or track_modifications:
_SessionSignalEvents.register(self)
SessionBase.__init__(
self, autocommit=autocommit, autoflush=autoflush,
bind=bind, binds=db.get_binds(self.app), **options
)
def get_bind(self, mapper=None, clause=None):
# mapper is None if someone tries to just get a connection
if mapper is not None:
info = getattr(mapper.mapped_table, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return SessionBase.get_bind(self, mapper, clause)
class _SessionSignalEvents(object):
@classmethod
def register(cls, session):
if not hasattr(session, '_model_changes'):
session._model_changes = {}
event.listen(session, 'before_flush', cls.record_ops)
event.listen(session, 'before_commit', cls.record_ops)
event.listen(session, 'before_commit', cls.before_commit)
event.listen(session, 'after_commit', cls.after_commit)
event.listen(session, 'after_rollback', cls.after_rollback)
@classmethod
def unregister(cls, session):
if hasattr(session, '_model_changes'):
del session._model_changes
event.remove(session, 'before_flush', cls.record_ops)
event.remove(session, 'before_commit', cls.record_ops)
event.remove(session, 'before_commit', cls.before_commit)
event.remove(session, 'after_commit', cls.after_commit)
event.remove(session, 'after_rollback', cls.after_rollback)
@staticmethod
def record_ops(session, flush_context=None, instances=None):
try:
d = session._model_changes
except AttributeError:
return
for targets, operation in ((session.new, 'insert'), (session.dirty, 'update'), (session.deleted, 'delete')):
for target in targets:
state = inspect(target)
key = state.identity_key if state.has_identity else id(target)
d[key] = (target, operation)
@staticmethod
def before_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
before_models_committed.send(session.app, changes=list(d.values()))
@staticmethod
def after_commit(session):
try:
d = session._model_changes
except AttributeError:
return
if d:
models_committed.send(session.app, changes=list(d.values()))
d.clear()
@staticmethod
def after_rollback(session):
try:
d = session._model_changes
except AttributeError:
return
d.clear()
class _EngineDebuggingSignalEvents(object):
"""Sets up handlers for two events that let us track the execution time of queries."""
def __init__(self, engine, import_name):
self.engine = engine
self.app_package = import_name
def register(self):
event.listen(self.engine, 'before_cursor_execute', self.before_cursor_execute)
event.listen(self.engine, 'after_cursor_execute', self.after_cursor_execute)
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
if connection_stack.top is not None:
context._query_start_time = _timer()
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
ctx = connection_stack.top
if ctx is not None:
queries = getattr(ctx, 'sqlalchemy_queries', None)
if queries is None:
queries = []
setattr(ctx, 'sqlalchemy_queries', queries)
queries.append(_DebugQueryTuple((
statement, parameters, context._query_start_time, _timer(),
_calling_context(self.app_package))))
def get_debug_queries():
"""In debug mode Flask-SQLAlchemy will log all the SQL queries sent
to the database. This information is available until the end of request
which makes it possible to easily ensure that the SQL generated is the
one expected on errors or in unittesting. If you don't want to enable
the DEBUG mode for your unittests you can also enable the query
recording by setting the ``'SQLALCHEMY_RECORD_QUERIES'`` config variable
to `True`. This is automatically enabled if Flask is in testing mode.
The value returned will be a list of named tuples with the following
attributes:
`statement`
The SQL statement issued
`parameters`
The parameters for the SQL statement
`start_time` / `end_time`
Time the query started / the results arrived. Please keep in mind
that the timer function used depends on your platform. These
values are only useful for sorting or comparing. They do not
necessarily represent an absolute timestamp.
`duration`
Time the query took in seconds
`context`
A string giving a rough estimation of where in your application
query was issued. The exact format is undefined so don't try
to reconstruct filename or function name.
"""
return getattr(connection_stack.top, 'sqlalchemy_queries', [])
class Pagination(object):
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: the unlimited query object that was used to create this
#: pagination object.
self.query = query
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class BaseQuery(orm.Query):
"""The default query object used for models, and exposed as
:attr:`~SQLAlchemy.Query`. This can be subclassed and
replaced for individual models by setting the :attr:`~Model.query_class`
attribute. This is a subclass of a standard SQLAlchemy
:class:`~sqlalchemy.orm.query.Query` class and has all the methods of a
standard query as well.
"""
def get_or_404(self, ident):
"""Like :meth:`get` but aborts with 404 if not found instead of
returning `None`.
"""
rv = self.get(ident)
if rv is None:
abort(404)
return rv
def first_or_404(self):
"""Like :meth:`first` but aborts with 404 if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
abort(404)
return rv
def paginate(self, page=None, per_page=None, error_out=True):
"""Returns `per_page` items from page `page`. By default it will
abort with 404 if no items were found and the page was larger than
1. This behavor can be disabled by setting `error_out` to `False`.
If page or per_page are None, they will be retrieved from the
request query. If the values are not ints and ``error_out`` is
true, it will abort with 404. If there is no request or they
aren't in the query, they default to page 1 and 20
respectively.
Returns an :class:`Pagination` object.
"""
if has_request_context():
if page is None:
try:
page = int(request.args.get('page', 1))
except (TypeError, ValueError):
if error_out:
abort(404)
page = 1
if per_page is None:
try:
per_page = int(request.args.get('per_page', 20))
except (TypeError, ValueError):
if error_out:
abort(404)
per_page = 20
else:
if page is None:
page = 1
if per_page is None:
per_page = 20
if error_out and page < 1:
abort(404)
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class _QueryProperty(object):
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
def _record_queries(app):
if app.debug:
return True
rq = app.config['SQLALCHEMY_RECORD_QUERIES']
if rq is not None:
return rq
return bool(app.config.get('TESTING'))
class _EngineConnector(object):
def __init__(self, sa, app, bind=None):
self._sa = sa
self._app = app
self._engine = None
self._connected_for = None
self._bind = bind
self._lock = Lock()
def get_uri(self):
if self._bind is None:
return self._app.config['SQLALCHEMY_DATABASE_URI']
binds = self._app.config.get('SQLALCHEMY_BINDS') or ()
assert self._bind in binds, \
'Bind %r is not specified. Set it in the SQLALCHEMY_BINDS ' \
'configuration variable' % self._bind
return binds[self._bind]
def get_engine(self):
with self._lock:
uri = self.get_uri()
echo = self._app.config['SQLALCHEMY_ECHO']
if (uri, echo) == self._connected_for:
return self._engine
info = make_url(uri)
options = {'convert_unicode': True}
self._sa.apply_pool_defaults(self._app, options)
self._sa.apply_driver_hacks(self._app, info, options)
if echo:
options['echo'] = True
self._engine = rv = sqlalchemy.create_engine(info, **options)
if _record_queries(self._app):
_EngineDebuggingSignalEvents(self._engine,
self._app.import_name).register()
self._connected_for = (uri, echo)
return rv
def _should_set_tablename(bases, d):
"""Check what values are set by a class and its bases to determine if a
tablename should be automatically generated.
The class and its bases are checked in order of precedence: the class
itself then each base in the order they were given at class definition.
Abstract classes do not generate a tablename, although they may have set
or inherited a tablename elsewhere.
If a class defines a tablename or table, a new one will not be generated.
Otherwise, if the class defines a primary key, a new name will be generated.
This supports:
* Joined table inheritance without explicitly naming sub-models.
* Single table inheritance.
* Inheriting from mixins or abstract models.
:param bases: base classes of new class
:param d: new class dict
:return: True if tablename should be set
"""
if '__tablename__' in d or '__table__' in d or '__abstract__' in d:
return False
if any(v.primary_key for v in itervalues(d) if isinstance(v, sqlalchemy.Column)):
return True
for base in bases:
if hasattr(base, '__tablename__') or hasattr(base, '__table__'):
return False
for name in dir(base):
attr = getattr(base, name)
if isinstance(attr, sqlalchemy.Column) and attr.primary_key:
return True
class _BoundDeclarativeMeta(DeclarativeMeta):
def __new__(cls, name, bases, d):
if _should_set_tablename(bases, d):
def _join(match):
word = match.group()
if len(word) > 1:
return ('_%s_%s' % (word[:-1], word[-1])).lower()
return '_' + word.lower()
d['__tablename__'] = _camelcase_re.sub(_join, name).lstrip('_')
return DeclarativeMeta.__new__(cls, name, bases, d)
def __init__(self, name, bases, d):
bind_key = d.pop('__bind_key__', None)
DeclarativeMeta.__init__(self, name, bases, d)
if bind_key is not None:
self.__table__.info['bind_key'] = bind_key
def get_state(app):
"""Gets the state for the application"""
assert 'sqlalchemy' in app.extensions, \
'The sqlalchemy extension was not registered to the current ' \
'application. Please make sure to call init_app() first.'
return app.extensions['sqlalchemy']
class _SQLAlchemyState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, db, app):
self.db = db
self.app = app
self.connectors = {}
class Model(object):
"""Baseclass for custom user models."""
#: the query class used. The :attr:`query` attribute is an instance
#: of this class. By default a :class:`BaseQuery` is used.
query_class = BaseQuery
#: an instance of :attr:`query_class`. Can be used to query the
#: database for instances of this model.
query = None
class SQLAlchemy(object):
"""This class is used to control the SQLAlchemy integration to one
or more Flask applications. Depending on how you initialize the
object it is usable right away or will attach as needed to a
Flask application.
There are two usage modes which work very similarly. One is binding
the instance to a very specific Flask application::
app = Flask(__name__)
db = SQLAlchemy(app)
The second possibility is to create the object once and configure the
application later to support it::
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
db.init_app(app)
return app
The difference between the two is that in the first case methods like
:meth:`create_all` and :meth:`drop_all` will work all the time but in
the second case a :meth:`flask.Flask.app_context` has to exist.
By default Flask-SQLAlchemy will apply some backend-specific settings
to improve your experience with them. As of SQLAlchemy 0.6 SQLAlchemy
will probe the library for native unicode support. If it detects
unicode it will let the library handle that, otherwise do that itself.
Sometimes this detection can fail in which case you might want to set
`use_native_unicode` (or the ``SQLALCHEMY_NATIVE_UNICODE`` configuration
key) to `False`. Note that the configuration key overrides the
value you pass to the constructor.
This class also provides access to all the SQLAlchemy functions and classes
from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules. So you can
declare models like this::
class User(db.Model):
username = db.Column(db.String(80), unique=True)
pw_hash = db.Column(db.String(80))
You can still use :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly, but
note that Flask-SQLAlchemy customizations are available only through an
instance of this :class:`SQLAlchemy` class. Query classes default to
:class:`BaseQuery` for `db.Query`, `db.Model.query_class`, and the default
query_class for `db.relationship` and `db.backref`. If you use these
interfaces through :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` directly,
the default query class will be that of :mod:`sqlalchemy`.
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
You may also define your own SessionExtension instances as well when
defining your SQLAlchemy class instance. You may pass your custom instances
to the `session_extensions` keyword. This can be either a single
SessionExtension instance, or a list of SessionExtension instances. In the
following use case we use the VersionedListener from the SQLAlchemy
versioning examples.::
from history_meta import VersionedMeta, VersionedListener
app = Flask(__name__)
db = SQLAlchemy(app, session_extensions=[VersionedListener()])
class User(db.Model):
__metaclass__ = VersionedMeta
username = db.Column(db.String(80), unique=True)
pw_hash = db.Column(db.String(80))
The `session_options` parameter can be used to override session
options. If provided it's a dict of parameters passed to the
session's constructor.
.. versionadded:: 0.10
The `session_options` parameter was added.
.. versionadded:: 0.16
`scopefunc` is now accepted on `session_options`. It allows specifying
a custom function which will define the SQLAlchemy session's scoping.
.. versionadded:: 2.1
The `metadata` parameter was added. This allows for setting custom
naming conventions among other, non-trivial things.
"""
def __init__(self, app=None, use_native_unicode=True, session_options=None, metadata=None):
if session_options is None:
session_options = {}
session_options.setdefault('scopefunc', connection_stack.__ident_func__)
self.use_native_unicode = use_native_unicode
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(metadata)
self.Query = BaseQuery
self._engine_lock = Lock()
self.app = app
_include_sqlalchemy(self)
if app is not None:
self.init_app(app)
@property
def metadata(self):
"""Returns the metadata"""
return self.Model.metadata
def create_scoped_session(self, options=None):
"""Helper factory method that creates a scoped session. It
internally calls :meth:`create_session`.
"""
if options is None:
options = {}
scopefunc = options.pop('scopefunc', None)
return orm.scoped_session(partial(self.create_session, options),
scopefunc=scopefunc)
def create_session(self, options):
"""Creates the session. The default implementation returns a
:class:`SignallingSession`.
.. versionadded:: 2.0
"""
return SignallingSession(self, **options)
def make_declarative_base(self, metadata=None):
"""Creates the declarative base."""
base = declarative_base(cls=Model, name='Model',
metadata=metadata,
metaclass=_BoundDeclarativeMeta)
base.query = _QueryProperty(self)
return base
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. Never use a database in the context
of an application not initialized that way or connections will
leak.
"""
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite://')
app.config.setdefault('SQLALCHEMY_BINDS', None)
app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
app.config.setdefault('SQLALCHEMY_ECHO', False)
app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)
app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
app.config.setdefault('SQLALCHEMY_MAX_OVERFLOW', None)
app.config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', False)
track_modifications = app.config.setdefault('SQLALCHEMY_TRACK_MODIFICATIONS', None)
if track_modifications is None:
warnings.warn('SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True to suppress this warning.')
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sqlalchemy'] = _SQLAlchemyState(self, app)
# 0.9 and later
if hasattr(app, 'teardown_appcontext'):
teardown = app.teardown_appcontext
# 0.7 to 0.8
elif hasattr(app, 'teardown_request'):
teardown = app.teardown_request
# Older Flask versions
else:
if app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
raise RuntimeError("Commit on teardown requires Flask >= 0.7")
teardown = app.after_request
@teardown
def shutdown_session(response_or_exc):
if app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
if response_or_exc is None:
self.session.commit()
self.session.remove()
return response_or_exc
def apply_pool_defaults(self, app, options):
def _setdefault(optionkey, configkey):
value = app.config[configkey]
if value is not None:
options[optionkey] = value
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
def apply_driver_hacks(self, app, info, options):
"""This method is called before engine creation and used to inject
driver specific hacks into the options. The `options` parameter is
a dictionary of keyword arguments that will then be used to call
the :func:`sqlalchemy.create_engine` function.
The default implementation provides some saner defaults for things
like pool sizes for MySQL and sqlite. Also it injects the setting of
`SQLALCHEMY_NATIVE_UNICODE`.
"""
if info.drivername.startswith('mysql'):
info.query.setdefault('charset', 'utf8')
if info.drivername != 'mysql+gaerdbms':
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif info.drivername == 'sqlite':
pool_size = options.get('pool_size')
detected_in_memory = False
# we go to memory and the pool size was explicitly set to 0
# which is fail. Let the user know that
if info.database in (None, '', ':memory:'):
detected_in_memory = True
from sqlalchemy.pool import StaticPool
options['poolclass'] = StaticPool
if 'connect_args' not in options:
options['connect_args'] = {}
options['connect_args']['check_same_thread'] = False
if pool_size == 0:
raise RuntimeError('SQLite in memory database with an '
'empty queue not possible due to data '
'loss.')
# if pool size is None or explicitly set to 0 we assume the
# user did not want a queue for this sqlite connection and
# hook in the null pool.
elif not pool_size:
from sqlalchemy.pool import NullPool
options['poolclass'] = NullPool
# if it's not an in memory database we make the path absolute.
if not detected_in_memory:
info.database = os.path.join(app.root_path, info.database)
unu = app.config['SQLALCHEMY_NATIVE_UNICODE']
if unu is None:
unu = self.use_native_unicode
if not unu:
options['use_native_unicode'] = False
@property
def engine(self):
"""Gives access to the engine. If the database configuration is bound
to a specific application (initialized with an application) this will
always return a database connection. If however the current application
is used this might raise a :exc:`RuntimeError` if no application is
active at the moment.
"""
return self.get_engine(self.get_app())
def make_connector(self, app, bind=None):
"""Creates the connector for a given state and bind."""
return _EngineConnector(self, app, bind)
def get_engine(self, app, bind=None):
"""Returns a specific engine.
.. versionadded:: 0.12
"""
with self._engine_lock:
state = get_state(app)
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector
return connector.get_engine()
def get_app(self, reference_app=None):
"""Helper method that implements the logic to look up an application.
"""
if reference_app is not None:
return reference_app
if self.app is not None:
return self.app
ctx = connection_stack.top
if ctx is not None:
return ctx.app
raise RuntimeError('application not registered on db '
'instance and no application bound '
'to current context')
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
result = []
for table in itervalues(self.Model.metadata.tables):
if table.info.get('bind_key') == bind:
result.append(table)
return result
def get_binds(self, app=None):
"""Returns a dictionary with a table->engine mapping.
This is suitable for use of sessionmaker(binds=db.get_binds(app)).
"""
app = self.get_app(app)
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
retval = {}
for bind in binds:
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def _execute_for_all_tables(self, app, bind, operation, skip_tables=False):
app = self.get_app(app)
if bind == '__all__':
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
elif isinstance(bind, string_types) or bind is None:
binds = [bind]
else:
binds = bind
for bind in binds:
extra = {}
if not skip_tables:
tables = self.get_tables_for_bind(bind)
extra['tables'] = tables
op = getattr(self.Model.metadata, operation)
op(bind=self.get_engine(app, bind), **extra)
def create_all(self, bind='__all__', app=None):
"""Creates all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'create_all')
def drop_all(self, bind='__all__', app=None):
"""Drops all tables.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'drop_all')
def reflect(self, bind='__all__', app=None):
"""Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'reflect', skip_tables=True)
def __repr__(self):
app = None
if self.app is not None:
app = self.app
else:
ctx = connection_stack.top
if ctx is not None:
app = ctx.app
return '<%s engine=%r>' % (
self.__class__.__name__,
app and app.config['SQLALCHEMY_DATABASE_URI'] or None
)
| heejongahn/flask-sqlalchemy | flask_sqlalchemy/__init__.py | Python | bsd-3-clause | 35,980 |
import datetime
import logging
from collections import defaultdict
from billy.core import db
from billy.core import settings
from billy.utils import term_for_session
from billy.reports.utils import (update_common, get_quality_exceptions,
combine_reports)
logger = logging.getLogger('billy')
def _bill_report_dict():
return {'upper_count': 0,
'lower_count': 0,
'bill_types': defaultdict(int),
'_updated_this_year_count': 0,
'_updated_this_month_count': 0,
'_updated_today_count': 0,
'actions_unsorted': set(),
'actionless_count': 0,
'action_count': 0,
'actions_per_type': defaultdict(int),
'actions_per_actor': defaultdict(int),
'actions_per_month': defaultdict(int),
'sponsorless_count': 0,
'_sponsor_count': 0,
'_sponsors_with_id_count': 0,
'sponsors_per_type': defaultdict(int),
'_subjects_count': 0,
'bills_per_subject': defaultdict(int),
'versionless_count': 0,
'version_count': 0,
'unmatched_sponsors': set(),
'progress_meter_gaps': set(),
}
def scan_bills(abbr):
duplicate_sources = defaultdict(int)
duplicate_versions = defaultdict(int)
other_actions = defaultdict(int)
uncategorized_subjects = defaultdict(int)
sessions = defaultdict(_bill_report_dict)
# load exception data into sets of ids indexed by exception type
quality_exceptions = get_quality_exceptions(abbr)
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
session_d = sessions[bill['session']]
# chamber count & bill_types
if bill['chamber'] == 'lower':
session_d['lower_count'] += 1
elif bill['chamber'] == 'upper':
session_d['upper_count'] += 1
for type in bill['type']:
session_d['bill_types'][type] += 1
update_common(bill, session_d)
# actions
last_date = datetime.datetime(1900, 1, 1)
for action in bill['actions']:
date = action['date']
if date < last_date:
session_d['actions_unsorted'].add(bill['_id'])
session_d['action_count'] += 1
for type in action['type']:
session_d['actions_per_type'][type] += 1
if 'other' in action['type']:
other_actions[action['action']] += 1
session_d['actions_per_actor'][action['actor']] += 1
session_d['actions_per_month'][date.strftime('%Y-%m')] += 1
# handle no_actions bills
if not bill['actions']:
if bill['_id'] not in quality_exceptions['bills:no_actions']:
session_d['actionless_count'] += 1
else:
quality_exceptions['bills:no_actions'].remove(bill['_id'])
# sponsors
for sponsor in bill['sponsors']:
session_d['_sponsor_count'] += 1
if sponsor.get('leg_id') or sponsor.get('committee_id'):
session_d['_sponsors_with_id_count'] += 1
else:
# keep list of unmatched sponsors
session_d['unmatched_sponsors'].add(
(term_for_session(abbr, bill['session']), bill['chamber'],
sponsor['name'])
)
session_d['sponsors_per_type'][sponsor['type']] += 1
# handle no sponsors bills
if not bill['sponsors']:
if bill['_id'] not in quality_exceptions['bills:no_sponsors']:
session_d['sponsorless_count'] += 1
else:
quality_exceptions['bills:no_sponsors'].remove(bill['_id'])
# subjects
for subj in bill.get('scraped_subjects', []):
uncategorized_subjects[subj] += 1
if bill.get('subjects'):
session_d['_subjects_count'] += 1
for subject in bill['subjects']:
session_d['bills_per_subject'][subject] += 1
# sources
for source in bill['sources']:
duplicate_sources[source['url']] += 1
# versions
if not bill['versions']:
# total num of bills w/o versions
if bill['_id'] not in quality_exceptions['bills:no_versions']:
session_d['versionless_count'] += 1
else:
quality_exceptions['bills:no_versions'].remove(bill['_id'])
else:
# total num of versions
session_d['version_count'] += len(bill['versions'])
for doc in bill['versions']:
duplicate_versions[doc['url']] += 1
# TODO: add duplicate document detection back in?
# Check for progress meter gaps.
progress_meter_gaps = session_d['progress_meter_gaps']
action_dates = bill['action_dates']
bill_chamber = bill['chamber']
other_chamber = dict(lower='upper', upper='lower')[bill_chamber]
# Check for bills that were signed but didn't pass both chambers.
if bill['type'] == 'bill':
if action_dates['signed']:
if not action_dates['passed_upper']:
progress_meter_gaps.add(bill['_id'])
elif not action_dates['passed_lower']:
progress_meter_gaps.add(bill['_id'])
else:
# Check for nonbills that were signed but didn't pass their
# house of origin.
if action_dates['signed']:
if not action_dates['passed_' + bill_chamber]:
progress_meter_gaps.add(bill['_id'])
if action_dates['passed_' + other_chamber]:
if not action_dates['passed_' + bill_chamber]:
progress_meter_gaps.add(bill['_id'])
dup_version_urls = []
dup_source_urls = []
for url, n in duplicate_versions.iteritems():
if n > 1:
dup_version_urls.append(url)
for url, n in duplicate_sources.iteritems():
if n > 1:
dup_source_urls.append(url)
# do logging of unnecessary exceptions
for qe_type, qes in quality_exceptions.iteritems():
if qes:
logger.warning('unnecessary {0} exceptions for {1} bills: \n {2}'
.format(qe_type, len(qes), '\n '.join(qes)))
return {'duplicate_versions': dup_version_urls,
'duplicate_sources': dup_source_urls,
'other_actions': other_actions.items(),
'uncategorized_subjects': uncategorized_subjects.items(),
'sessions': sessions,
'progress_meter_gaps': []
}
def calculate_percentages(report):
# general bill stuff
bill_count = float(report['upper_count'] + report['lower_count']) / 100
if bill_count:
report['updated_this_year'] = (report.pop('_updated_this_year_count') /
bill_count)
report['updated_this_month'] = (report.pop('_updated_this_month_count')
/ bill_count)
report['updated_today'] = (report.pop('_updated_today_count') /
bill_count)
report['have_subjects'] = report.pop('_subjects_count') / bill_count
# actions
action_count = float(report['action_count']) / 100
if action_count:
for k in report['actions_per_type'].iterkeys():
report['actions_per_type'][k] /= action_count
for k in report['actions_per_actor'].iterkeys():
report['actions_per_actor'][k] /= action_count
for k in report['actions_per_month'].iterkeys():
report['actions_per_month'][k] /= action_count
# sponsors
_sponsor_count = float(report.pop('_sponsor_count')) / 100
if _sponsor_count:
report['sponsors_with_id'] = (
report.pop('_sponsors_with_id_count') / _sponsor_count)
for k in report['sponsors_per_type'].iterkeys():
report['sponsors_per_type'][k] /= _sponsor_count
def bill_report(abbr):
report = scan_bills(abbr)
combined_report = combine_reports(report['sessions'],
_bill_report_dict())
for session in report['sessions'].itervalues():
calculate_percentages(session)
calculate_percentages(combined_report)
report.update(combined_report)
return report
| loandy/billy | billy/reports/bills.py | Python | bsd-3-clause | 8,433 |
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import models
def validate_answer_to_universe(value):
if value != 42:
raise ValidationError('This is not the answer to life, universe and everything!', code='not42')
class ModelToValidate(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField(default=datetime.now)
number = models.IntegerField(db_column='number_val')
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
limit_choices_to={'number': 10},
)
email = models.EmailField(blank=True)
ufm = models.ForeignKey(
'UniqueFieldsModel',
models.SET_NULL,
to_field='unique_charfield',
blank=True, null=True,
)
url = models.URLField(blank=True)
f_with_custom_validator = models.IntegerField(blank=True, null=True, validators=[validate_answer_to_universe])
f_with_iterable_of_validators = models.IntegerField(blank=True, null=True,
validators=(validate_answer_to_universe,))
slug = models.SlugField(blank=True)
def clean(self):
super(ModelToValidate, self).clean()
if self.number == 11:
raise ValidationError('Invalid number supplied!')
class UniqueFieldsModel(models.Model):
unique_charfield = models.CharField(max_length=100, unique=True)
unique_integerfield = models.IntegerField(unique=True)
non_unique_field = models.IntegerField()
class CustomPKModel(models.Model):
my_pk_field = models.CharField(max_length=100, primary_key=True)
class UniqueTogetherModel(models.Model):
cfield = models.CharField(max_length=100)
ifield = models.IntegerField()
efield = models.EmailField()
class Meta:
unique_together = (('ifield', 'cfield',), ['ifield', 'efield'])
class UniqueForDateModel(models.Model):
start_date = models.DateField()
end_date = models.DateTimeField()
count = models.IntegerField(unique_for_date="start_date", unique_for_year="end_date")
order = models.IntegerField(unique_for_month="end_date")
name = models.CharField(max_length=100)
class CustomMessagesModel(models.Model):
other = models.IntegerField(blank=True, null=True)
number = models.IntegerField(
db_column='number_val',
error_messages={'null': 'NULL', 'not42': 'AAARGH', 'not_equal': '%s != me'},
validators=[validate_answer_to_universe]
)
class Author(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, models.CASCADE)
pub_date = models.DateTimeField(blank=True)
def clean(self):
if self.pub_date is None:
self.pub_date = datetime.now()
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
class UniqueErrorsModel(models.Model):
name = models.CharField(max_length=100, unique=True, error_messages={'unique': 'Custom unique name message.'})
no = models.IntegerField(unique=True, error_messages={'unique': 'Custom unique number message.'})
class GenericIPAddressTestModel(models.Model):
generic_ip = models.GenericIPAddressField(blank=True, null=True, unique=True)
v4_ip = models.GenericIPAddressField(blank=True, null=True, protocol="ipv4")
v6_ip = models.GenericIPAddressField(blank=True, null=True, protocol="ipv6")
ip_verbose_name = models.GenericIPAddressField("IP Address Verbose", blank=True, null=True)
class GenericIPAddrUnpackUniqueTest(models.Model):
generic_v4unpack_ip = models.GenericIPAddressField(null=True, blank=True, unique=True, unpack_ipv4=True)
# A model can't have multiple AutoFields
# Refs #12467.
assertion_error = None
try:
class MultipleAutoFields(models.Model):
auto1 = models.AutoField(primary_key=True)
auto2 = models.AutoField(primary_key=True)
except AssertionError as exc:
assertion_error = exc
assert str(assertion_error) == "A model can't have more than one AutoField."
| mattseymour/django | tests/validation/models.py | Python | bsd-3-clause | 4,761 |
import pytest
from pandas._libs.tslibs.frequencies import get_freq
from pandas._libs.tslibs.period import period_asfreq, period_ordinal
@pytest.mark.parametrize(
"freq1,freq2,expected",
[
("D", "H", 24),
("D", "T", 1440),
("D", "S", 86400),
("D", "L", 86400000),
("D", "U", 86400000000),
("D", "N", 86400000000000),
("H", "T", 60),
("H", "S", 3600),
("H", "L", 3600000),
("H", "U", 3600000000),
("H", "N", 3600000000000),
("T", "S", 60),
("T", "L", 60000),
("T", "U", 60000000),
("T", "N", 60000000000),
("S", "L", 1000),
("S", "U", 1000000),
("S", "N", 1000000000),
("L", "U", 1000),
("L", "N", 1000000),
("U", "N", 1000),
],
)
def test_intra_day_conversion_factors(freq1, freq2, expected):
assert period_asfreq(1, get_freq(freq1), get_freq(freq2), False) == expected
@pytest.mark.parametrize(
"freq,expected", [("A", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)]
)
def test_period_ordinal_start_values(freq, expected):
# information for Jan. 1, 1970.
assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq(freq)) == expected
@pytest.mark.parametrize(
"dt,expected",
[
((1970, 1, 4, 0, 0, 0, 0, 0), 1),
((1970, 1, 5, 0, 0, 0, 0, 0), 2),
((2013, 10, 6, 0, 0, 0, 0, 0), 2284),
((2013, 10, 7, 0, 0, 0, 0, 0), 2285),
],
)
def test_period_ordinal_week(dt, expected):
args = dt + (get_freq("W"),)
assert period_ordinal(*args) == expected
@pytest.mark.parametrize(
"day,expected",
[
# Thursday (Oct. 3, 2013).
(3, 11415),
# Friday (Oct. 4, 2013).
(4, 11416),
# Saturday (Oct. 5, 2013).
(5, 11417),
# Sunday (Oct. 6, 2013).
(6, 11417),
# Monday (Oct. 7, 2013).
(7, 11417),
# Tuesday (Oct. 8, 2013).
(8, 11418),
],
)
def test_period_ordinal_business_day(day, expected):
args = (2013, 10, day, 0, 0, 0, 0, 0, get_freq("B"))
assert period_ordinal(*args) == expected
| toobaz/pandas | pandas/tests/tslibs/test_period_asfreq.py | Python | bsd-3-clause | 2,130 |
#
# Gordon McMillan (as inspired and influenced by Greg Stein)
#
# subclasses may not need marshal or struct, but since they're
# builtin, importing is safe.
#
# While an Archive is really an abstraction for any "filesystem
# within a file", it is tuned for use with imputil.FuncImporter.
# This assumes it contains python code objects, indexed by the
# the internal name (ie, no '.py').
# See carchive.py for a more general archive (contains anything)
# that can be understood by a C program.
#archive_rt is a stripped down version of MEInc.Dist.archive.
#It has had all building logic removed.
#It's purpose is to bootstrap the Python installation.
import marshal
import struct
class Archive:
""" A base class for a repository of python code objects.
The extract method is used by imputil.ArchiveImporter
to get code objects by name (fully qualified name), so
an enduser "import a.b" would become
extract('a.__init__')
extract('a.b')
"""
MAGIC = 'PYL\0'
HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc
TOCPOS = 8
TRLLEN = 0 # default - no trailer
TOCTMPLT = {} #
os = None
def __init__(self, path=None, start=0):
"Initialize an Archive. If path is omitted, it will be an empty Archive."
self.toc = None
self.path = path
self.start = start
import imp
self.pymagic = imp.get_magic()
if path is not None:
self.lib = open(self.path, 'rb')
self.checkmagic()
self.loadtoc()
####### Sub-methods of __init__ - override as needed #############
def checkmagic(self):
""" Overridable.
Check to see if the file object self.lib actually has a file
we understand.
"""
self.lib.seek(self.start) #default - magic is at start of file
if self.lib.read(len(self.MAGIC)) != self.MAGIC:
raise RuntimeError, "%s is not a valid %s archive file" \
% (self.path, self.__class__.__name__)
if self.lib.read(len(self.pymagic)) != self.pymagic:
raise RuntimeError, "%s has version mismatch to dll" % (self.path)
def loadtoc(self):
""" Overridable.
Default: After magic comes an int (4 byte native) giving the
position of the TOC within self.lib.
Default: The TOC is a marshal-able string.
"""
self.lib.seek(self.start + self.TOCPOS)
(offset,) = struct.unpack('=i', self.lib.read(4))
self.lib.seek(self.start + offset)
self.toc = marshal.load(self.lib)
######## This is what is called by FuncImporter #######
## Since an Archive is flat, we ignore parent and modname.
def get_code(self, parent, modname, fqname):
print "parent: ", parent
print "modname: ", modname
print "fqname: ", fqname
return self.extract(fqname) # None if not found, (ispkg, code) otherwise
if rslt is None:
return None
ispkg, code = rslt
if ispkg:
return ispkg, code, {'__path__': []}
return rslt
####### Core method - Override as needed #########
def extract(self, name):
""" Get the object corresponding to name, or None.
For use with imputil ArchiveImporter, object is a python code object.
'name' is the name as specified in an 'import name'.
'import a.b' will become:
extract('a') (return None because 'a' is not a code object)
extract('a.__init__') (return a code object)
extract('a.b') (return a code object)
Default implementation:
self.toc is a dict
self.toc[name] is pos
self.lib has the code object marshal-ed at pos
"""
ispkg, pos = self.toc.get(name, (0, None))
if pos is None:
return None
self.lib.seek(self.start + pos)
return ispkg, marshal.load(self.lib)
########################################################################
# Informational methods
def contents(self):
"""Return a list of the contents
Default implementation assumes self.toc is a dict like object.
Not required by ArchiveImporter.
"""
return self.toc.keys()
########################################################################
# Building
####### Top level method - shouldn't need overriding #######
## def build(self, path, lTOC):
## """Create an archive file of name 'path'.
## lTOC is a 'logical TOC' - a list of (name, path, ...)
## where name is the internal name, eg 'a'
## and path is a file to get the object from, eg './a.pyc'.
## """
## self.path = path
## self.lib = open(path, 'wb')
## #reserve space for the header
## if self.HDRLEN:
## self.lib.write('\0'*self.HDRLEN)
##
## #create an empty toc
##
## if type(self.TOCTMPLT) == type({}):
## self.toc = {}
## else: # assume callable
## self.toc = self.TOCTMPLT()
##
## for tocentry in lTOC:
## self.add(tocentry) # the guts of the archive
##
## tocpos = self.lib.tell()
## self.save_toc(tocpos)
## if self.TRLLEN:
## self.save_trailer(tocpos)
## if self.HDRLEN:
## self.update_headers(tocpos)
## self.lib.close()
##
##
## ####### manages keeping the internal TOC and the guts in sync #######
## def add(self, entry):
## """Override this to influence the mechanics of the Archive.
## Assumes entry is a seq beginning with (nm, pth, ...) where
## nm is the key by which we'll be asked for the object.
## pth is the name of where we find the object. Overrides of
## get_obj_from can make use of further elements in entry.
## """
## if self.os is None:
## import os
## self.os = os
## nm = entry[0]
## pth = entry[1]
## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__'
## self.toc[nm] = (ispkg, self.lib.tell())
## f = open(entry[1], 'rb')
## f.seek(8) #skip magic and timestamp
## self.lib.write(f.read())
##
## def save_toc(self, tocpos):
## """Default - toc is a dict
## Gets marshaled to self.lib
## """
## marshal.dump(self.toc, self.lib)
##
## def save_trailer(self, tocpos):
## """Default - not used"""
## pass
##
## def update_headers(self, tocpos):
## """Default - MAGIC + Python's magic + tocpos"""
## self.lib.seek(self.start)
## self.lib.write(self.MAGIC)
## self.lib.write(self.pymagic)
## self.lib.write(struct.pack('=i', tocpos))
##############################################################
#
# ZlibArchive - an archive with compressed entries
#
class ZlibArchive(Archive):
MAGIC = 'PYZ\0'
TOCPOS = 8
HDRLEN = 12
TRLLEN = 0
TOCTMPLT = {}
LEVEL = 9
def __init__(self, path=None, offset=0):
Archive.__init__(self, path, offset)
# dynamic import so not imported if not needed
global zlib
import zlib
def extract(self, name):
(ispkg, pos, lngth) = self.toc.get(name, (0, None, 0))
if pos is None:
return None
self.lib.seek(self.start + pos)
return ispkg, marshal.loads(zlib.decompress(self.lib.read(lngth)))
## def add(self, entry):
## if self.os is None:
## import os
## self.os = os
## nm = entry[0]
## pth = entry[1]
## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__'
## f = open(pth, 'rb')
## f.seek(8) #skip magic and timestamp
## obj = zlib.compress(f.read(), self.LEVEL)
## self.toc[nm] = (ispkg, self.lib.tell(), len(obj))
## self.lib.write(obj)
##
| toontownfunserver/Panda3D-1.9.0 | direct/pyinst/archive_rt.py | Python | bsd-3-clause | 7,405 |
""" pyvalence
"""
__version__ = '0.0.1.3'
| blakeboswell/valence | pyvalence/__init__.py | Python | bsd-3-clause | 44 |
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.error.ErrorListener import ProxyErrorListener, ConsoleErrorListener
# need forward delcaration
RecognitionException = None
class Recognizer(object):
tokenTypeMapCache = dict()
ruleIndexMapCache = dict()
def __init__(self):
self._listeners = [ ConsoleErrorListener.INSTANCE ]
self._interp = None
self._stateNumber = -1
def extractVersion(self, version):
pos = version.find(".")
major = version[0:pos]
version = version[pos+1:]
pos = version.find(".")
if pos==-1:
pos = version.find("-")
if pos==-1:
pos = len(version)
minor = version[0:pos]
return major, minor
def checkVersion(self, toolVersion):
runtimeVersion = "4.6.1"
rvmajor, rvminor = self.extractVersion(runtimeVersion)
tvmajor, tvminor = self.extractVersion(toolVersion)
if rvmajor!=tvmajor or rvminor!=tvminor:
print("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion)
def addErrorListener(self, listener):
self._listeners.append(listener)
def removeErrorListener(self, listener):
self._listeners.remove(listener)
def removeErrorListeners(self):
self._listeners = []
def getTokenTypeMap(self):
tokenNames = self.getTokenNames()
if tokenNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of token names.")
result = self.tokenTypeMapCache.get(tokenNames, None)
if result is None:
result = zip( tokenNames, range(0, len(tokenNames)))
result["EOF"] = Token.EOF
self.tokenTypeMapCache[tokenNames] = result
return result
# Get a map from rule names to rule indexes.
#
# <p>Used for XPath and tree pattern compilation.</p>
#
def getRuleIndexMap(self):
ruleNames = self.getRuleNames()
if ruleNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of rule names.")
result = self.ruleIndexMapCache.get(ruleNames, None)
if result is None:
result = zip( ruleNames, range(0, len(ruleNames)))
self.ruleIndexMapCache[ruleNames] = result
return result
def getTokenType(self, tokenName:str):
ttype = self.getTokenTypeMap().get(tokenName, None)
if ttype is not None:
return ttype
else:
return Token.INVALID_TYPE
# What is the error header, normally line/character position information?#
def getErrorHeader(self, e:RecognitionException):
line = e.getOffendingToken().line
column = e.getOffendingToken().column
return "line "+line+":"+column
# How should a token be displayed in an error message? The default
# is to display just the text, but during development you might
# want to have a lot of information spit out. Override in that case
# to use t.toString() (which, for CommonToken, dumps everything about
# the token). This is better than forcing you to override a method in
# your token objects because you don't have to go modify your lexer
# so that it creates a new Java type.
#
# @deprecated This method is not called by the ANTLR 4 Runtime. Specific
# implementations of {@link ANTLRErrorStrategy} may provide a similar
# feature when necessary. For example, see
# {@link DefaultErrorStrategy#getTokenErrorDisplay}.
#
def getTokenErrorDisplay(self, t:Token):
if t is None:
return "<no token>"
s = t.text
if s is None:
if t.type==Token.EOF:
s = "<EOF>"
else:
s = "<" + str(t.type) + ">"
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
def getErrorListenerDispatch(self):
return ProxyErrorListener(self._listeners)
# subclass needs to override these if there are sempreds or actions
# that the ATN interp needs to execute
def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
return True
def precpred(self, localctx:RuleContext , precedence:int):
return True
@property
def state(self):
return self._stateNumber
# Indicate that the recognizer has changed internal state that is
# consistent with the ATN state passed in. This way we always know
# where we are in the ATN as the parser goes along. The rule
# context objects form a stack that lets us see the stack of
# invoking rules. Combine this and we have complete ATN
# configuration information.
@state.setter
def state(self, atnState:int):
self._stateNumber = atnState
del RecognitionException
import unittest
class Test(unittest.TestCase):
def testVersion(self):
major, minor = Recognizer().extractVersion("1.2")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2.3")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2-snapshot")
self.assertEqual("1", major)
self.assertEqual("2", minor)
| Pursuit92/antlr4 | runtime/Python3/src/antlr4/Recognizer.py | Python | bsd-3-clause | 5,808 |
''' Provide basic Bokeh server objects that use a Tornado ``HTTPServer`` and
``BokeTornado`` Tornado Application to service Bokeh Server Applications.
There are two public classes in this module:
:class:`~bokeh.server.server.BaseServer`
This is a lightweight class to explicitly coordinate the components needed
to run a Bokeh server (A :class:`~bokeh.server.tornado.BokehTornado`
instance, and Tornado ``HTTPServer`` and a Tornado ``IOLoop``)
:class:`~bokeh.server.server.Server`
This higher-level convenience class only needs to be configured with Bokeh
:class:`~bokeh.application.application.Application` instances, and will
automatically create and coordinate the lower level Tornado components.
'''
from __future__ import absolute_import, print_function
import atexit
import logging
log = logging.getLogger(__name__)
import signal
import sys
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from .. import __version__
from ..application import Application
from ..core.properties import Bool, Int, List, String
from ..resources import DEFAULT_SERVER_PORT
from ..util.options import Options
from .util import bind_sockets, create_hosts_whitelist
from .tornado import BokehTornado
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
class _ServerOpts(Options):
num_procs = Int(default=1, help="""
The number of worker processes to start for the HTTP server. If an explicit
``io_loop`` is also configured, then ``num_procs=1`` is the only compatible
value. Use ``BaseServer`` to coordinate an explicit ``IOLoop`` with a
multi-process HTTP server.
A value of 0 will auto detect number of cores.
Note that due to limitations inherent in Tornado, Windows does not support
``num_procs`` values greater than one! In this case consider running
multiple Bokeh server instances behind a load balancer.
""")
address = String(default=None, help="""
The address the server should listen on for HTTP requests.
""")
port = Int(default=DEFAULT_SERVER_PORT, help="""
The port number the server should listen on for HTTP requests.
""")
prefix = String(default="", help="""
A URL prefix to use for all Bokeh server paths.
""")
allow_websocket_origin = List(String, default=None, help="""
A list of hosts that can connect to the websocket.
This is typically required when embedding a Bokeh server app in an external
web site using :func:`~bokeh.embed.server_document` or similar.
If None, "localhost" is used.
""")
use_xheaders = Bool(default=False, help="""
Whether to have the Bokeh server override the remote IP and URI scheme
and protocol for all requests with ``X-Real-Ip``, ``X-Forwarded-For``,
``X-Scheme``, ``X-Forwarded-Proto`` headers (if they are provided).
""")
class BaseServer(object):
''' Explicitly coordinate the level Tornado components required to run a
Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
All three of these components must be passed to ``BaseServer``, which will
initialize the ``BokehTornado`` instance on the ``io_loop``. The
``http_server`` must have been previously created and initialized with the
``BokehTornado`` instance.
'''
def __init__(self, io_loop, tornado_app, http_server):
''' Create a ``BaseServer`` instance.
Args:
io_loop (IOLoop) :
A Tornado ``IOLoop`` to run the Bokeh Tornado application on.
tornado_app (BokehTornado) :
An instance of the Bokeh Tornado application that generates
Bokeh Documents and Sessions.
http_server (HTTPServer) :
A Tornado ``HTTPServer`` to service HTTP requests for Bokeh
applications. Should have already be configured with the
``tornado_app`` when created.
'''
self._started = False
self._stopped = False
self._http = http_server
self._loop = io_loop
self._tornado = tornado_app
self._tornado.initialize(io_loop)
@property
def io_loop(self):
''' The Tornado ``IOLoop`` that this Bokeh Server is running on.
'''
return self._loop
def start(self):
''' Install the Bokeh Server and its background tasks on a Tornado
``IOLoop``.
This method does *not* block and does *not* affect the state of the
Tornado ``IOLoop`` You must start and stop the loop yourself, i.e.
this method is typically useful when you are already explicitly
managing an ``IOLoop`` yourself.
To start a Bokeh server and immediately "run forever" in a blocking
manner, see :func:`~bokeh.server.server.BaseServer.run_until_shutdown`.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def unlisten(self):
''' Stop listening on ports. The server will no longer be usable after
calling this function.
Returns:
None
'''
self._http.close_all_connections()
self._http.stop()
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
Calling this method will start the Tornado ``IOLoop`` and block
all execution in the calling process.
Returns:
None
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
def get_session(self, app_path, session_id):
''' Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession
'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path=None):
''' Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession]
'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
This method is useful for testing or running Bokeh server applications
on a local machine but should not call when running Bokeh server for
an actual deployment.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path)
from bokeh.util.browser import view
view(url, browser=browser, new=new)
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
class Server(BaseServer):
''' A high level convenience class to run a Bokeh server.
This class can automatically coordinate the three the base level
components required to run a Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
This high level ``Server`` class has some limitations. In particular, it is
not possible to set an explicit ``io_loop`` and ``num_procs`` other than 1
at the same time. To do that, it is necessary to use ``BaseServer`` and
coordinate the three components above explicitly.
'''
def __init__(self, applications, io_loop=None, http_server_kwargs=None, **kwargs):
''' Create a ``Server`` instance.
Args:
applications (dict[str, Application] or Application or callable) :
A mapping from URL paths to Application instances, or a single
Application to put at the root URL.
The Application is a factory for Documents, with a new Document
initialized for each Session. Each application is identified
by a path that corresponds to a URL, like "/" or "/myapp"
If a single Application is provided, it is mapped to the URL
path "/" automatically.
As a convenience, a callable may also be provided, in which
an Application will be created for it using FunctionHandler.
io_loop (IOLoop, optional) :
An explicit Tornado ``IOLoop`` to run Bokeh Server code on. If
None, ``IOLoop.current()`` will be used (default: None)
http_server_kwargs (dict, optional) :
Extra arguments passed to ``tornado.httpserver.HTTPServer``.
E.g. ``max_buffer_size`` to specify the maximum upload size.
More details can be found at:
http://www.tornadoweb.org/en/stable/httpserver.html#http-server
If None, no extra arguments are passed (default: None)
Additionally, the following options may be passed to configure the
operation of ``Server``:
.. bokeh-options:: _ServerOpts
:module: bokeh.server.server
Any remaining keyword arguments will be passed as-is to
``BokehTornado``.
'''
log.info("Starting Bokeh server version %s (running on Tornado %s)" % (__version__, tornado.version))
from bokeh.application.handlers.function import FunctionHandler
if callable(applications):
applications = Application(FunctionHandler(applications))
if isinstance(applications, Application):
applications = { '/' : applications }
for k, v in list(applications.items()):
if callable(v):
applications[k] = Application(FunctionHandler(v))
opts = _ServerOpts(kwargs)
self._port = opts.port
self._address = opts.address
self._prefix = opts.prefix
if opts.num_procs != 1:
assert all(app.safe_to_fork for app in applications.values()), (
'User application code has run before attempting to start '
'multiple processes. This is considered an unsafe operation.')
if opts.num_procs > 1 and io_loop is not None:
raise RuntimeError(
"Setting both num_procs and io_loop in Server is incompatible. Use BaseServer to coordinate an explicit IOLoop and multi-process HTTPServer"
)
if opts.num_procs > 1 and sys.platform == "win32":
raise RuntimeError("num_procs > 1 not supported on Windows")
if http_server_kwargs is None:
http_server_kwargs = {}
http_server_kwargs.setdefault('xheaders', opts.use_xheaders)
sockets, self._port = bind_sockets(self.address, self.port)
extra_websocket_origins = create_hosts_whitelist(opts.allow_websocket_origin, self.port)
try:
tornado_app = BokehTornado(applications, extra_websocket_origins=extra_websocket_origins, prefix=self.prefix, **kwargs)
http_server = HTTPServer(tornado_app, **http_server_kwargs)
http_server.start(opts.num_procs)
http_server.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only refer to IOLoop after HTTPServer.start() is called, see #5524
if io_loop is None:
io_loop = IOLoop.current()
super(Server, self).__init__(io_loop, tornado_app, http_server)
@property
def prefix(self):
''' The configured URL prefix to use for all Bokeh server paths.
'''
return self._prefix
@property
def port(self):
''' The configured port number that the server listens on for HTTP
requests.
'''
return self._port
@property
def address(self):
''' The configured address that the server listens on for HTTP
requests.
'''
return self._address
| Karel-van-de-Plassche/bokeh | bokeh/server/server.py | Python | bsd-3-clause | 14,928 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions related to blackbody radiation."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# LOCAL
from ..modeling import blackbody as _bb
from ..utils.decorators import deprecated
__all__ = ['blackbody_nu', 'blackbody_lambda']
# Units
FNU = _bb.FNU
FLAM = _bb.FLAM
@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_nu')
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
return _bb.blackbody_nu(in_x, temperature)
@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_lambda')
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
return _bb.blackbody_lambda(in_x, temperature)
| AustereCuriosity/astropy | astropy/analytic_functions/blackbody.py | Python | bsd-3-clause | 2,225 |
# GoodFETclient to interface zigduino/atmel128 radio
# forked by bx from code by neighbor Travis Goodspeed
from GoodFETAVR import GoodFETAVR
import sys, binascii, os, array, time, glob, struct
fmt = ("B", "<H", None, "<L")
class GoodFETatmel128rfa1(GoodFETAVR):
ATMELRADIOAPP = 0x53
autocrc = 0
verbose = False
connected = 0
enable_AACK = False
def serInit(self, port=None, timeout=2, attemptlimit=None):
if port==None:
port=os.environ.get("GOODFET");
self.pyserInit(port, timeout, attemptlimit)
def pyserInit(self, port, timeout, attemptlimit):
"""Open the serial port"""
if self.connected == 0:
if (not (attemptlimit == None)) and (attemptlimit <= 1):
# it always takes at least 2 tries
attemptlimit == 2
# Make timeout None to wait forever, 0 for non-blocking mode.
import serial;
if os.name=='nt' and sys.version.find('64 bit')!=-1:
print "WARNING: PySerial requires a 32-bit Python build in Windows.";
if port is None and os.environ.get("GOODFET")!=None:
glob_list = glob.glob(os.environ.get("GOODFET"));
if len(glob_list) > 0:
port = glob_list[0];
else:
port = os.environ.get("GOODFET");
if port is None:
glob_list = glob.glob("/dev/tty.usbserial*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyUSB*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyU0");
if len(glob_list) > 0:
port = glob_list[0];
if port is None and os.name=='nt':
from scanwin32 import winScan;
scan=winScan();
for order,comport,desc,hwid in sorted(scan.comports()):
try:
if hwid.index('FTDI')==0:
port=comport;
#print "Using FTDI port %s" % port
except:
#Do nothing.
a=1;
baud=115200;
self.serialport = serial.Serial(
port,
baud,
parity = serial.PARITY_NONE,
timeout=timeout
)
self.verb=0;
self.data=""
attempts=0;
self.connected=0;
while self.connected==0:
self.serialport.setDTR(False)
while self.verb!=0x7F or self.data!="http://goodfet.sf.net/":
if attemptlimit is not None and attempts >= attemptlimit:
return
attempts=attempts+1;
self.readcmd(); #Read the first command.
if self.verbose:
print "Got %02x,%02x:'%s'" % (self.app,self.verb,self.data);
#Here we have a connection, but maybe not a good one.
#print "We have a connection."
for foo in range(1,30):
time.sleep(1)
if not self.monitorecho():
self.connected = 0
if self.verbose:
print "Comm error on try %i." % (foo)
else:
self.connected = 1
break
if self.verbose:
print "Connected after %02i attempts." % attempts;
self.serialport.timeout = 12;
def serClose(self):
self.connected = 0
self.serialport.close()
def writecmd(self, app, verb, count=0, data=[]):
"""Write a command and some data to the GoodFET."""
self.serialport.write(chr(app));
self.serialport.write(chr(verb));
if self.verbose:
print "Tx: ( 0x%02x, 0x%02x, %d )" % ( app, verb, count )
if count > 0:
if(isinstance(data,list)):
old = data
data = []
for i in range(0,count):
data += chr(old[i]);
outstr=''.join(data);
#little endian 16-bit length
count = len(outstr)
self.serialport.write(chr(count&0xFF));
self.serialport.write(chr(count>>8));
if count > 0:
if self.verbose:
print "sending: %s" %outstr.encode("hex")
self.serialport.write(outstr);
if not self.besilent:
out = self.readcmd()
if out and self.verbose:
print "read: " + out.encode("hex")
return out
else:
return None
def readcmd(self):
"""Read a reply from the GoodFET."""
app = self.serialport.read(1)
if len(app) < 1:
if self.verbose:
print "Rx: None"
self.app = 0
self.verb = 0
self.count = 0
self.data = ""
return
self.app=ord(app);
v = self.serialport.read(1);
if v:
self.verb = ord(v)
else:
self.verb = 0
c1 = self.serialport.read(1)
c2 = self.serialport.read(1)
if (c1 and c2):
self.count= ord(c1) + (ord(c2)<<8)
else:
self.count = 0
if self.verbose:
print "Rx: ( 0x%02x, 0x%02x, %i )" % ( self.app, self.verb, self.count )
#Debugging string; print, but wait.
if self.app==0xFF:
if self.verb==0xFF:
print "# DEBUG %s" % self.serialport.read(self.count)
elif self.verb==0xFE:
print "# DEBUG 0x%x" % struct.unpack(fmt[self.count-1], self.serialport.read(self.count))[0]
elif self.verb==0xFD:
#Do nothing, just wait so there's no timeout.
print "# NOP.";
return ""
else:
self.data=self.serialport.read(self.count);
return self.data;
def RF_setchannel(self, chan):
if (chan < 11) or (chan > 26):
print "Channel out of range"
else:
self.poke(0x8, chan)
def peek(self,reg,bytes=1):
"""Read a Register. """
#Automatically calibrate the len.
if bytes != 1:
print "Warning, currently cannot poke more than 1 byte"
bytes = 1
data = [reg, 0, bytes%255, bytes>>8] #+ ([0]*bytes)
self.data = None
self.writecmd(self.ATMELRADIOAPP,0x02,len(data),data);
toret=0;
#print self.data.encode("hex")
if self.data:
#for i in range(0,bytes):
# toret=toret|(ord(self.data[i+1])<<(8*i));
#return toret;
# right now only works with a byte of data
return ord(self.data)
else:
return -1
def poke(self,reg,val,bytes=1): # todo, support >1 byte
"""Write an Register."""
data = [reg, 0] #+ ([0]*bytes)
data=[reg, 0]
if bytes != 1:
print "Warning, currently cannot poke more than 1 byte"
bytes = 1
for i in range(0,bytes):
data=data+[(val>>(8*i))&0xFF];
self.writecmd(self.ATMELRADIOAPP,0x03,len(data),data);
newval = self.peek(reg,bytes)
if newval!=val:
print "Warning, failed to set r%02x=%02x, got %02x." %(
reg,
val,
newval);
return;
def setup(self):
self.RF_setup()
def RF_setup(self):
self.writecmd(self.ATMELRADIOAPP, 0x10, 0, None)
def RF_rxpacket(self):
"""Get a packet from the radio. Returns None if none is waiting."""
#doto: check if packet has arrived, flush if not new
self.writecmd(self.ATMELRADIOAPP, 0x80, 0, None)
data=self.data;
self.packetlen = len(data)
if (self.packetlen > 0):
return data;
else:
return None
def RF_txpacket(self, payload):
if type(payload) == list: #convert to string
import array
payload = array.array('B', payload).tostring()
self.writecmd(self.ATMELRADIOAPP, 0x81, len(payload), payload)
def RF_getrssi(self):
"""Returns the received signal strength"""
base = -90
val = self.peek(0x7) & 0x7f # read rssi bits
if val == 0:
return base - 1
elif val < 0x53:
return val + base
else:
return 0x53 + base
def RF_enable_AACK(self, enable = True):
if (enable and (not self.enable_AACK)):
self.enable_AACK = True
self.writecmd(self.ATMELRADIOAPP, 0x84)
elif ((not enable) and self.enable_AACK):
self.enable_AACK = False
self.writecmd(self.ATMELRADIOAPP, 0x85)
def RF_autocrc(self, autocrc=1):
self.autocrc = autocrc
if autocrc:
self.writecmd(self.ATMELRADIOAPP, 0x86)
else:
self.writecmd(self.ATMELRADIOAPP, 0x87)
| rfmcpherson/killerbee | killerbee/GoodFETatmel128.py | Python | bsd-3-clause | 9,329 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chromiumos/metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chromiumos/metrics.proto',
package='chromiumos',
syntax='proto3',
serialized_options=_b('Z4go.chromium.org/chromiumos/infra/proto/go/chromiumos'),
serialized_pb=_b('\n\x18\x63hromiumos/metrics.proto\x12\nchromiumos\"i\n\x0bMetricEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1e\n\x16timestamp_milliseconds\x18\x02 \x01(\x03\x12\x1d\n\x15\x64uration_milliseconds\x18\x03 \x01(\x04\x12\r\n\x05gauge\x18\x04 \x01(\x04\x42\x36Z4go.chromium.org/chromiumos/infra/proto/go/chromiumosb\x06proto3')
)
_METRICEVENT = _descriptor.Descriptor(
name='MetricEvent',
full_name='chromiumos.MetricEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='chromiumos.MetricEvent.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_milliseconds', full_name='chromiumos.MetricEvent.timestamp_milliseconds', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='duration_milliseconds', full_name='chromiumos.MetricEvent.duration_milliseconds', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gauge', full_name='chromiumos.MetricEvent.gauge', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=145,
)
DESCRIPTOR.message_types_by_name['MetricEvent'] = _METRICEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MetricEvent = _reflection.GeneratedProtocolMessageType('MetricEvent', (_message.Message,), dict(
DESCRIPTOR = _METRICEVENT,
__module__ = 'chromiumos.metrics_pb2'
# @@protoc_insertion_point(class_scope:chromiumos.MetricEvent)
))
_sym_db.RegisterMessage(MetricEvent)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| endlessm/chromium-browser | third_party/chromite/api/gen/chromiumos/metrics_pb2.py | Python | bsd-3-clause | 3,384 |
__description__ = "Zookeeper"
__config__ = {}
| samuel/kokki | kokki/cookbooks/zookeeper/metadata.py | Python | bsd-3-clause | 47 |
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.models import Count, F
from django.utils.timezone import now
from hc.accounts.models import Profile
class Command(BaseCommand):
help = """Prune old, inactive user accounts.
Conditions for removing an user account:
- created 1 month ago and never logged in. Does not belong
to any team.
Use case: visitor types in their email at the website but
never follows through with login.
"""
def handle(self, *args, **options):
month_ago = now() - timedelta(days=30)
# Old accounts, never logged in, no team memberships
q = User.objects.order_by("id")
q = q.annotate(n_teams=Count("memberships"))
q = q.filter(date_joined__lt=month_ago, last_login=None, n_teams=0)
n, summary = q.delete()
count = summary.get("auth.User", 0)
self.stdout.write("Pruned %d never-logged-in user accounts." % count)
# Profiles scheduled for deletion
q = Profile.objects.order_by("id")
q = q.filter(deletion_notice_date__lt=month_ago)
# Exclude users who have logged in after receiving deletion notice
q = q.exclude(user__last_login__gt=F("deletion_notice_date"))
for profile in q:
self.stdout.write("Deleting inactive %s" % profile.user.email)
profile.user.delete()
return "Done!"
| healthchecks/healthchecks | hc/accounts/management/commands/pruneusers.py | Python | bsd-3-clause | 1,501 |
"""
URLs used in the unit tests for django-registration.
You should not attempt to use these URLs in any sort of real or
development environment; instead, use
``registration/backends/default/urls.py``. This URLconf includes those
URLs, and also adds several additional URLs which serve no purpose
other than to test that optional keyword arguments are properly
handled.
"""
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from ..views import activate
from ..views import register
urlpatterns = patterns('',
# Test the 'activate' view with custom template
# name.
url(r'^activate-with-template-name/(?P<activation_key>\w+)/$',
activate,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_template_name'),
# Test the 'activate' view with
# extra_context_argument.
url(r'^activate-extra-context/(?P<activation_key>\w+)/$',
activate,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_extra_context'),
# Test the 'activate' view with success_url argument.
url(r'^activate-with-success-url/(?P<activation_key>\w+)/$',
activate,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_success_url'),
# Test the 'register' view with custom template
# name.
url(r'^register-with-template-name/$',
register,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_template_name'),
# Test the'register' view with extra_context
# argument.
url(r'^register-extra-context/$',
register,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_extra_context'),
# Test the 'register' view with custom URL for
# closed registration.
url(r'^register-with-disallowed-url/$',
register,
{'disallowed_url': 'registration_test_custom_disallowed',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_disallowed_url'),
# Set up a pattern which will correspond to the
# custom 'disallowed_url' above.
url(r'^custom-disallowed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_test_custom_disallowed'),
# Test the 'register' view with custom redirect
# on successful registration.
url(r'^register-with-success_url/$',
register,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_success_url'
),
# Pattern for custom redirect set above.
url(r'^custom-success/$',
direct_to_template,
{'template': 'registration/test_template_name.html'},
name='registration_test_custom_success_url'),
(r'', include('registration.backends.default.urls')),
)
| husarion/django-registration | registration/tests/urls.py | Python | bsd-3-clause | 4,623 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import djangobmf.storage
import djangobmf.fields.file
import django.utils.timezone
import djangobmf.utils.generate_filename
class Migration(migrations.Migration):
replaces = [('djangobmf', '0001_squashed_0_2_0'), ('djangobmf', '0002_dashboard_update'), ('djangobmf', '0003_delete_workspace'), ('djangobmf', '0004_added_active_field'), ('djangobmf', '0005_added_unique_together'), ('djangobmf', '0006_report_settings'), ('djangobmf', '0007_update_renderer'), ('djangobmf', '0008_renderer_filefields'), ('djangobmf', '0009_notification_rename'), ('djangobmf', '0010_notification_db_optimization'), ('djangobmf', '0011_added_numberrange'), ('djangobmf', '0012_delete_dashboard'), ('djangobmf', '0013_update_document')]
dependencies = [
migrations.swappable_dependency(settings.BMF_CONTRIB_CUSTOMER),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.BMF_CONTRIB_PROJECT),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('topic', models.CharField(verbose_name='Topic', blank=True, null=True, max_length=100)),
('text', models.TextField(verbose_name='Text', blank=True, null=True)),
('action', models.PositiveSmallIntegerField(verbose_name='Action', default=1, choices=[(1, 'Comment'), (2, 'Created'), (3, 'Updated'), (4, 'Workflow'), (5, 'File')], editable=False, null=True)),
('template', models.CharField(verbose_name='Template', editable=False, null=True, max_length=100)),
('parent_id', models.PositiveIntegerField()),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified')),
('parent_ct', models.ForeignKey(related_name='bmf_history_parent', to='contenttypes.ContentType')),
('user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'modified',
'verbose_name': 'Activity',
'ordering': ('-modified',),
'verbose_name_plural': 'Activity',
'abstract': False,
},
),
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('app_label', models.CharField(verbose_name='Application', editable=False, null=True, max_length=100)),
('field_name', models.CharField(verbose_name='Fieldname', editable=False, null=True, max_length=100)),
('value', models.TextField(verbose_name='Value', null=True)),
('active', models.BooleanField(verbose_name='Active', default=True)),
],
options={
'ordering': ['app_label', 'field_name'],
'verbose_name_plural': 'Configurations',
'abstract': False,
'verbose_name': 'Configuration',
'default_permissions': ('change',),
},
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('name', models.CharField(verbose_name='Name', blank=True, null=True, max_length=120)),
('mimetype', models.CharField(verbose_name='Mimetype', editable=False, null=True, max_length=120)),
('encoding', models.CharField(verbose_name='Encoding', editable=False, null=True, max_length=60)),
('description', models.TextField(verbose_name='Description', blank=True, null=True)),
('file', models.FileField(upload_to=djangobmf.utils.generate_filename.generate_filename, storage=djangobmf.storage.Storage(), verbose_name='File')),
('size', models.PositiveIntegerField(editable=False, blank=True, null=True)),
('sha1', models.CharField(verbose_name='SHA1', editable=False, null=True, max_length=40)),
('is_static', models.BooleanField(editable=False, default=True)),
('file_exists', models.BooleanField(default=True)),
('content_id', models.PositiveIntegerField(editable=False, blank=True, null=True)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified', null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created', null=True)),
('content_type', models.ForeignKey(related_name='bmf_document', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contenttypes.ContentType')),
('created_by', models.ForeignKey(related_name='+', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL)),
('customer', models.ForeignKey(related_name='documents', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.BMF_CONTRIB_CUSTOMER)),
('modified_by', models.ForeignKey(related_name='+', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(related_name='documents', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.BMF_CONTRIB_PROJECT)),
],
options={
'get_latest_by': 'modified',
'verbose_name': 'Document',
'permissions': [('view_document', 'Can view documents')],
'verbose_name_plural': 'Documents',
'abstract': False,
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('watch_id', models.PositiveIntegerField(null=True, db_index=True)),
('triggered', models.BooleanField(verbose_name='Triggered', default=True, editable=False, db_index=True)),
('unread', models.BooleanField(verbose_name='Unread', default=True, editable=False, db_index=True)),
('last_seen_object', models.PositiveIntegerField(null=True)),
('new_entry', models.BooleanField(verbose_name='New entry', default=False, db_index=True)),
('comments', models.BooleanField(verbose_name='Comment written', default=False, db_index=True)),
('files', models.BooleanField(verbose_name='File added', default=False, db_index=True)),
('detectchanges', models.BooleanField(verbose_name='Object changed', default=False, db_index=True)),
('workflow', models.BooleanField(verbose_name='Workflowstate changed', default=False, db_index=True)),
('modified', models.DateTimeField(verbose_name='Modified', default=django.utils.timezone.now, editable=False, null=True)),
('user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL)),
('watch_ct', models.ForeignKey(to='contenttypes.ContentType', null=True)),
],
options={
'ordering': ('-modified',),
'verbose_name_plural': 'Watched activities',
'abstract': False,
'verbose_name': 'Watched activity',
'get_latest_by': 'modified',
'default_permissions': (),
},
),
migrations.CreateModel(
name='NumberRange',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('period_start', models.DateField(null=True, db_index=True)),
('period_final', models.DateField(null=True, db_index=True)),
('counter', models.PositiveIntegerField(default=1, null=True)),
('ct', models.ForeignKey(related_name='+', null=True, editable=False, to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Renderer',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('name', models.CharField(verbose_name='Name', max_length=20)),
('size', models.CharField(verbose_name='Size', default='A4/A', max_length=20)),
('letter', models.BooleanField(verbose_name='Letter', default=True)),
('extra', models.BooleanField(verbose_name='Extra', default=False)),
('letter_margin_right', models.PositiveIntegerField(verbose_name='Letter margin right', default=10)),
('letter_margin_bottom', models.PositiveIntegerField(verbose_name='Letter margin bottom', default=40)),
('letter_extra_right', models.PositiveIntegerField(verbose_name='Letter extra right', default=10)),
('letter_extra_top', models.PositiveIntegerField(verbose_name='Letter extra top', default=10)),
('letter_footer_right', models.PositiveIntegerField(verbose_name='Letter footer height', default=10)),
('page_margin_right', models.PositiveIntegerField(verbose_name='Letter margin right', default=10)),
('page_margin_bottom', models.PositiveIntegerField(verbose_name='Letter margin bottom', default=15)),
('page_margin_top', models.PositiveIntegerField(verbose_name='Letter margin top', default=20)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified')),
('letter_background', djangobmf.fields.file.FileField(related_name='+', null=True, verbose_name='Letter background', blank=True, to='djangobmf.Document')),
('page_background', djangobmf.fields.file.FileField(related_name='+', null=True, verbose_name='Page background', blank=True, to='djangobmf.Document')),
],
options={
'verbose_name': 'Renderer',
'get_latest_by': 'modified',
'verbose_name_plural': 'Renderer',
'abstract': False,
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('key', models.CharField(editable=False, null=True, db_index=True, verbose_name='Key', blank=True, max_length=255)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified')),
('contenttype', models.ForeignKey(help_text='Connect a Report to an BMF-Model', related_name='bmf_report', null=True, editable=False, blank=True, to='contenttypes.ContentType')),
('renderer', models.ForeignKey(help_text='Connect a Report to an Renderer', related_name='reports', null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='djangobmf.Renderer')),
],
options={
'verbose_name': 'Report',
'get_latest_by': 'modified',
'verbose_name_plural': 'Reports',
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='configuration',
unique_together=set([('app_label', 'field_name')]),
),
migrations.AlterUniqueTogether(
name='numberrange',
unique_together=set([('ct', 'period_start', 'period_final')]),
),
migrations.AlterUniqueTogether(
name='notification',
unique_together=set([('user', 'watch_ct', 'watch_id')]),
),
]
| django-bmf/django-bmf | djangobmf/migrations/0001_squashed_0_2_9.py | Python | bsd-3-clause | 12,381 |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 3 21:08:49 2017
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_allclose
# load data into module namespace
from statsmodels.datasets.cpunish import load
from statsmodels.discrete.discrete_model import (
NegativeBinomial,
NegativeBinomialP,
Poisson,
)
import statsmodels.discrete.tests.results.results_count_margins as res_stata
from statsmodels.tools.tools import add_constant
cpunish_data = load()
cpunish_data.exog = np.asarray(cpunish_data.exog)
cpunish_data.endog = np.asarray(cpunish_data.endog)
cpunish_data.exog[:,3] = np.log(cpunish_data.exog[:,3])
exog = add_constant(cpunish_data.exog, prepend=False)
endog = cpunish_data.endog - 1 # avoid zero-truncation
exog /= np.round(exog.max(0), 3)
class CheckMarginMixin(object):
rtol_fac = 1
def test_margins_table(self):
res1 = self.res1
sl = self.res1_slice
rf = self.rtol_fac
assert_allclose(self.margeff.margeff, self.res1.params[sl], rtol=1e-5 * rf)
assert_allclose(self.margeff.margeff_se, self.res1.bse[sl], rtol=1e-6 * rf)
assert_allclose(self.margeff.pvalues, self.res1.pvalues[sl], rtol=5e-6 * rf)
assert_allclose(self.margeff.conf_int(), res1.margins_table[sl, 4:6],
rtol=1e-6 * rf)
class TestPoissonMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
#res_poi = mod_poi.fit(maxiter=100)
marge_poi = res_poi.get_margeff()
cls.res = res_poi
cls.margeff = marge_poi
cls.rtol_fac = 1
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_poisson_margins_cont
class TestPoissonMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
marge_poi = res_poi.get_margeff(dummy=True)
cls.res = res_poi
cls.margeff = marge_poi
cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_poisson_margins_dummy
class TestNegBinMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
# negbin has lower agreement with Stata in this case
class TestNegBinMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff(dummy=True)
cls.res = res
cls.margeff = marge
cls.res1_slice = cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_negbin_margins_dummy
cls.rtol_fac = 5e1
class TestNegBinPMargin(CheckMarginMixin):
# this is the same as the nb2 version above for NB-P, p=2
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomialP(endog, exog) # checks also that default p=2
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
# negbin has lower agreement with Stata in this case
| bashtage/statsmodels | statsmodels/discrete/tests/test_margins.py | Python | bsd-3-clause | 4,664 |
import json
import os
import types
from . import constants
from .constants import PACKAGE_ANY
from .errorbundler import ErrorBundle
# This is necessary. Do not remove it unless you know exactly what
# you are doing.
import loader # noqa
import submain
def validate(path, format='json',
approved_applications=os.path.join(os.path.dirname(__file__),
'app_versions.json'),
determined=True,
listed=True,
expectation=PACKAGE_ANY,
for_appversions=None,
overrides=None,
timeout=-1,
compat_test=False,
**kw):
"""
Perform validation in one easy step!
`path`:
*Required*
A file system path to the package to be validated.
`format`:
The format to return the results in. Defaults to "json". Currently, any
other format will simply return the error bundle.
`approved_applications`:
Path to the list of approved application versions
`determined`:
If set to `False`, validation will halt at the end of the first tier
that raises errors.
`listed`:
Whether the app is headed for the app marketplace or AMO. Defaults to
`True`.
`expectation`:
The type of package that should be expected. Must be a symbolic
constant from validator.constants (i.e.:
validator.constants.PACKAGE_*). Defaults to PACKAGE_ANY.
`for_appversions`:
A dict of app GUIDs referencing lists of versions. Determines which
version-dependant tests should be run.
`timeout`:
Number of seconds before aborting addon validation, or -1 to
run with no timeout.
`compat_tests`:
A flag to signal the validator to skip tests which should not be run
during compatibility bumps. Defaults to `False`.
"""
bundle = ErrorBundle(listed=listed, determined=determined,
overrides=overrides, for_appversions=for_appversions)
bundle.save_resource('is_compat_test', compat_test)
if isinstance(approved_applications, types.StringTypes):
# Load up the target applications if the approved applications is a
# path (string).
with open(approved_applications) as approved_apps:
apps = json.load(approved_apps)
elif isinstance(approved_applications, dict):
# If the lists of approved applications are already in a dict, just use
# that instead of trying to pull from a file.
apps = approved_applications
else:
raise ValueError('Unknown format for `approved_applications`.')
constants.APPROVED_APPLICATIONS.clear()
constants.APPROVED_APPLICATIONS.update(apps)
submain.prepare_package(bundle, path, expectation,
for_appversions=for_appversions,
timeout=timeout)
return format_result(bundle, format)
def format_result(bundle, format):
# Write the results to the pipe
formats = {'json': lambda b: b.render_json()}
if format is not None:
return formats[format](bundle)
else:
return bundle
| muffinresearch/amo-validator | validator/validate.py | Python | bsd-3-clause | 3,193 |
import unittest
from conans.test.utils.tools import TestClient
from conans.util.files import load, save
class ConditionalReqsTest(unittest.TestCase):
def conditional_requirements_test(self):
conanfile = """from conans import ConanFile
class TestConanLib(ConanFile):
name = "Hello"
version = "0.1"
settings = "os", "build_type", "product"
"""
test_conanfile = '''
from conans import ConanFile
class TestConanLib(ConanFile):
requires = "Hello/0.1@lasote/testing"
settings = "os", "build_type", "product"
def requirements(self):
self.output.info("Conditional test requirement: %s, %s, %s"
% (self.settings.os, self.settings.build_type, self.settings.product))
def test(self):
pass
'''
client = TestClient()
settings_path = client.client_cache.settings_path
client.client_cache.settings
settings = load(settings_path)
settings += "\nproduct: [onion, potato]"
save(settings_path, settings)
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile})
client.run("create . lasote/testing -s os=Windows -s product=onion -s build_type=Release")
self.assertIn("PROJECT: Conditional test requirement: Windows, Release, onion",
client.user_io.out)
| birsoyo/conan | conans/test/functional/conditional_test_req.py | Python | mit | 1,380 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
| terracoin/terracoin | qa/rpc-tests/nodehandling.py | Python | mit | 3,440 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Enums/PokemonRarity.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Enums/PokemonRarity.proto',
package='POGOProtos.Enums',
syntax='proto3',
serialized_pb=_b('\n$POGOProtos/Enums/PokemonRarity.proto\x12\x10POGOProtos.Enums*6\n\rPokemonRarity\x12\n\n\x06NORMAL\x10\x00\x12\r\n\tLEGENDARY\x10\x01\x12\n\n\x06MYTHIC\x10\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_POKEMONRARITY = _descriptor.EnumDescriptor(
name='PokemonRarity',
full_name='POGOProtos.Enums.PokemonRarity',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NORMAL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEGENDARY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MYTHIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=58,
serialized_end=112,
)
_sym_db.RegisterEnumDescriptor(_POKEMONRARITY)
PokemonRarity = enum_type_wrapper.EnumTypeWrapper(_POKEMONRARITY)
NORMAL = 0
LEGENDARY = 1
MYTHIC = 2
DESCRIPTOR.enum_types_by_name['PokemonRarity'] = _POKEMONRARITY
# @@protoc_insertion_point(module_scope)
| polzy/PokeManager | pogo/POGOProtos/Enums/PokemonRarity_pb2.py | Python | mit | 1,831 |
#!/usr/bin/python
# (C) 2015 Muthiah Annamalai, <ezhillang@gmail.com>
# Ezhil Language Foundation
#
from __future__ import print_function
import sys
import codecs
import tamil
import json
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
class WordList:
@staticmethod
def extract_words(filename):
ht = json.load( codecs.open(filename,'r','utf-8') )
for word in sorted(ht.keys()):
print(word)
return
@staticmethod
def pull_words_from_json():
for itr in range(1,25):
filename = u"v%02d.json"%itr
WordList.extract_words(filename)
return
class WordFilter:
@staticmethod
def filter_and_save(word_size=4):
match_word_length = lambda word: len(tamil.utf8.get_letters(word.strip().replace(' ',''))) == word_size
filename = u'tamilvu_dictionary_words.txt'
matches = []
with codecs.open(filename,'r','utf-8') as fp:
matches = filter( match_word_length, fp.readlines())
with codecs.open('word_filter_%02d.txt'%word_size,'w','utf-8') as fp:
for word in matches:
fp.write(u'%s\n'%word.replace(' ','').strip())
print(u'we found %d words of length %d\n'%(len(matches),word_size))
return
if __name__ == u"__main__":
# WordList.pull_words_from_json()
for wlen in range(3,20):
WordFilter.filter_and_save( wlen )
| arcturusannamalai/open-tamil | solthiruthi/data/tamilvu_wordlist.py | Python | mit | 1,438 |
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Merge
from keras.utils import np_utils
import numpy as np
nb_classes = 10
batch_size = 128
nb_epoch = 1
max_train_samples = 5000
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
#########################
# sequential model test #
#########################
print('Test sequential')
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate(X_train, Y_train, verbose=0)
print('score:', score)
if score < 0.25:
raise Exception('Score too low, learning issue.')
preds = model.predict(X_test, verbose=0)
classes = model.predict_classes(X_test, verbose=0)
model.get_config(verbose=1)
###################
# merge test: sum #
###################
print('Test merge: sum')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.22:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
model.get_config(verbose=1)
###################
# merge test: concat #
###################
print('Test merge: concat')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(50*2, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test))
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.22:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test], verbose=0)
model.get_config(verbose=1)
##########################
# test merge recursivity #
##########################
print('Test merge recursivity')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 50))
right.add(Activation('relu'))
righter = Sequential()
righter.add(Dense(784, 50))
righter.add(Activation('relu'))
intermediate = Sequential()
intermediate.add(Merge([left, right], mode='sum'))
intermediate.add(Dense(50, 50))
intermediate.add(Activation('relu'))
model = Sequential()
model.add(Merge([intermediate, righter], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], Y_test))
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], Y_test))
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0)
print('score:', score)
if score < 0.19:
raise Exception('Score too low, learning issue.')
preds = model.predict([X_test, X_test, X_test], verbose=0)
classes = model.predict_classes([X_test, X_test, X_test], verbose=0)
model.get_config(verbose=1)
model.save_weights('temp.h5')
model.load_weights('temp.h5')
score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0)
print('score:', score)
######################
# test merge overlap #
######################
print('Test merge overlap')
left = Sequential()
left.add(Dense(784, 50))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(50, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
score = model.evaluate(X_train, Y_train, verbose=0)
print('score:', score)
if score < 0.22:
raise Exception('Score too low, learning issue.')
preds = model.predict(X_test, verbose=0)
classes = model.predict_classes(X_test, verbose=0)
model.get_config(verbose=1)
| kfoss/keras | tests/manual/check_models.py | Python | mit | 8,592 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import json
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('sonarr_list')
class SonarrSet(MutableSet):
supported_ids = ['tvdb_id', 'tvrage_id', 'tvmaze_id', 'imdb_id', 'slug', 'sonarr_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def series_request_builder(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series list request')
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def lookup_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series lookup request')
url = '%s://%s:%s%s/api/series/lookup?term=' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def profile_list_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
url = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def rootfolder_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received rootfolder list request')
url = '%s://%s:%s%s/api/Rootfolder' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def get_json(self, url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def post_json(self, url, headers, data):
try:
response = requests.post(url, headers=headers, data=data)
if response.status_code == 201:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def request_builder(self, base_url, request_type, port, api_key):
if request_type == 'series':
return self.series_request_builder(base_url, port, api_key)
elif request_type == 'profile':
return self.profile_list_request(base_url, port, api_key)
elif request_type == 'lookup':
return self.lookup_request(base_url, port, api_key)
elif request_type == 'rootfolder':
return self.rootfolder_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
def translate_quality(self, quality_name):
"""
Translate Sonnar's qualities to ones recognize by Flexget
"""
if quality_name == 'Raw-HD': # No better match yet in Flexget
return 'remux'
elif quality_name == 'DVD': # No better match yet in Flexget
return 'dvdrip'
else:
return quality_name.replace('-', ' ').lower()
def quality_requirement_builder(self, quality_profile):
allowed_qualities = [self.translate_quality(quality['quality']['name']) for quality in quality_profile['items']
if quality['allowed']]
cutoff = self.translate_quality(quality_profile['cutoff']['name'])
return allowed_qualities, cutoff
def list_entries(self):
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
json = self.get_json(series_url, series_headers)
# Retrieves Sonarr's profile list if include_data is set to true
if self.config.get('include_data'):
profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
self.config.get('port'),
self.config['api_key'])
profiles_json = self.get_json(profile_url, profile_headers)
entries = []
for show in json:
fg_qualities = '' # Initializes the quality parameter
fg_cutoff = ''
path = None
if not show['monitored'] and self.config.get(
'only_monitored'): # Checks if to retrieve just monitored shows
continue
if show['status'] == 'ended' and not self.config.get('include_ended'): # Checks if to retrieve ended shows
continue
if self.config.get('include_data') and profiles_json: # Check if to retrieve quality & path
path = show.get('path')
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
tvmaze_id=show.get('tvMazeId'),
imdb_id=show.get('imdbid'),
slug=show.get('titleSlug'),
sonarr_id=show.get('id'),
configure_series_target=fg_cutoff)
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if path:
entry['configure_series_path'] = path
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
return entries
def add_show(self, entry):
log.debug('searching for show match for %s using Sonarr', entry)
lookup_series_url, lookup_series_headers = self.request_builder(self.config.get('base_url'), 'lookup',
self.config.get('port'), self.config['api_key'])
if entry.get('tvdb_id'):
lookup_series_url += 'tvdb:%s' % entry.get('tvdb_id')
else:
lookup_series_url += entry.get('title')
lookup_results = self.get_json(lookup_series_url, headers=lookup_series_headers)
if not lookup_results:
log.debug('could not find series match to %s', entry)
return
else:
if len(lookup_results) > 1:
log.debug('got multiple results for Sonarr, using first one')
show = lookup_results[0]
log.debug('using show %s', show)
# Getting rootfolder
rootfolder_series_url, rootfolder_series_headers = self.request_builder(self.config.get('base_url'),
'rootfolder', self.config.get('port'),
self.config['api_key'])
rootfolder = self.get_json(rootfolder_series_url, headers=rootfolder_series_headers)
# Setting defaults for Sonarr
show['profileId'] = 1
show['qualityProfileId '] = 1
show['rootFolderPath'] = rootfolder[0]['path']
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
log.debug('adding show %s to sonarr', show)
returned_show = self.post_json(series_url, headers=series_headers, data=json.dumps(show))
return returned_show
def remove_show(self, show):
delete_series_url, delete_series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
delete_series_url += '/%s' % show.get('sonarr_id')
requests.delete(delete_series_url, headers=delete_series_headers)
@property
def shows(self):
if self._shows is None:
self._shows = self.list_entries()
return self._shows
def _find_entry(self, entry):
for sb_entry in self.shows:
if any(entry.get(id) is not None and entry[id] == sb_entry[id] for id in self.supported_ids):
return sb_entry
if entry.get('title').lower() == sb_entry.get('title').lower():
return sb_entry
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
def __init__(self, config):
self.config = config
self._shows = None
def __iter__(self):
return (entry for entry in self.shows)
def __len__(self):
return len(self.shows)
def __contains__(self, entry):
return self._find_entry(entry) is not None
def add(self, entry):
if not self._find_entry(entry):
show = self.add_show(entry)
self._shows = None
log.verbose('Successfully added show %s to Sonarr', show['title'])
else:
log.debug('entry %s already exists in Sonarr list', entry)
def discard(self, entry):
show = self._find_entry(entry)
if not show:
log.debug('Did not find matching show in Sonarr for %s, skipping', entry)
return
self.remove_show(show)
log.verbose('removed show %s from Sonarr', show['title'])
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
def get(self, entry):
return self._find_entry(entry)
class SonarrList(object):
schema = SonarrSet.schema
@staticmethod
def get_list(config):
return SonarrSet(config)
def on_task_input(self, task, config):
return list(SonarrSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(SonarrList, 'sonarr_list', api_ver=2, groups=['list'])
| oxc/Flexget | flexget/plugins/list/sonarr_list.py | Python | mit | 11,933 |
from __future__ import print_function
import numpy as np
import scipy.stats, scipy.optimize
import acq4.pyqtgraph as pg
class StageCalibration(object):
def __init__(self, stage):
self.stage = stage
self.framedelay = None
def calibrate(self, camera):
import imreg_dft # FFT image registration by Chris Gohlke; available via pip
n = 300
dx = 10e-6
self.move = None
self.camera = camera
self.offsets = np.empty((n, 2))
self.frames = []
self.index = 0
# current stage position
pos = self.stage.getPosition()
# where to move on each update
self.positions = np.zeros((n, 2))
self.positions[:,0] = pos[0] + np.arange(n) * dx
self.positions[:,1] = pos[1]
camera.sigNewFrame.connect(self.newFrame)
def newFrame(self, frame):
try:
if self.move is not None and not self.move.isDone():
# stage is still moving; discard frame
return
if self.framedelay is None:
# stage has stopped; discard 2 more frames to be sure
# we get the right image.
self.framedelay = pg.ptime.time() + 1./frame.info()['fps']
elif self.framedelay < frame.info()['time']:
# now we are ready to keep this frame.
self.framedelay = None
self.processFrame(frame)
except Exception:
pg.disconnect(self.camera.sigNewFrame, self.newFrame)
raise
def processFrame(self, frame):
self.frames.append(frame)
index = self.index
# update index for next iteration
self.index += 1
# decide whether to move the stage
finished = self.index >= self.positions.shape[0]
if not finished:
self.move = self.stage.moveTo(self.positions[self.index], 'slow')
# calculate offset (while stage moves no next location)
if index == 0:
offset = (0, 0)
else:
compareIndex = max(0, index-10)
offset, _ = imreg_dft.translation(frame.getImage(), self.frames[compareIndex].getImage())
px = self.camera.getPixelSize()
offset = self.offsets[compareIndex] + offset.astype(float) * [px.x(), px.y()]
self.offsets[index] = offset
# finish up if there are no more positions
if finished:
pg.disconnect(self.camera.sigNewFrame, self.newFrame)
self.analyze()
def analyze(self):
# frames = []
# for frame in self.frames:
# frames.append(frame.getImage()[np.newaxis, ...])
# self.frameArray = np.concatenate(frames, axis=0)
# self.imageView = pg.image(self.frameArray)
# linear regression to determine scale between stage steps and camera microns
x = ((self.positions - self.positions[0])**2).sum(axis=1)**0.5
y = (self.offsets**2).sum(axis=1)**0.5
slope, yint, r, p, stdev = scipy.stats.linregress(x, y)
# subtract linear approximation to get residual error
y1 = x * slope + yint
self.xvals = x
self.error = y - y1
self.errorPlot = pg.plot(x, self.error, title='X axis error (slope = %0.2f um/step)' % (slope*1e6), labels={'left': ('Error', 'm'), 'bottom': ('position', 'steps')})
# fit residual to combination of sine waves
def fn(p, x):
return (p[2] * np.sin((x + p[0]) * 1 * p[1]) +
p[3] * np.sin((x + p[0]) * 2 * p[1]) +
p[4] * np.sin((x + p[0]) * 3 * p[1]) +
p[5] * np.sin((x + p[0]) * 4 * p[1]))
def erf(p, x, y):
return fn(p, x) - y
f0 = 6 * np.pi / x.max() # guess there are 3 cycles in the data
amp = self.error.max()
self.fit = scipy.optimize.leastsq(erf, [0, f0, amp, amp, amp, amp], (x, self.error))[0]
self.errorPlot.plot(x, fn(self.fit, x), pen='g')
| pbmanis/acq4 | acq4/devices/Stage/calibration.py | Python | mit | 4,016 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from flexget import options, plugin
from flexget.event import event
from flexget.terminal import console
log = logging.getLogger('try_regexp')
class PluginTryRegexp(object):
"""
This plugin allows user to test regexps for a task.
"""
def __init__(self):
self.abort = False
def matches(self, entry, regexp):
"""Return True if any of the entry string fields match given regexp"""
import re
for field, value in entry.items():
if not isinstance(value, basestring):
continue
if re.search(regexp, value, re.IGNORECASE | re.UNICODE):
return (True, field)
return (False, None)
def on_task_filter(self, task, config):
if not task.options.try_regexp:
return
if self.abort:
return
console('-' * 79)
console('Hi there, welcome to try regexps in realtime!')
console('Press ^D or type \'exit\' to continue. Type \'continue\' to continue non-interactive execution.')
console('Task \'%s\' has %s entries, enter regexp to see what matches it.' % (task.name, len(task.entries)))
while (True):
try:
s = input('--> ')
if s == 'exit':
break
if s == 'abort' or s == 'continue':
self.abort = True
break
except EOFError:
break
count = 0
for entry in task.entries:
try:
match, field = self.matches(entry, s)
if match:
console('Title: %-40s URL: %-30s From: %s' % (entry['title'], entry['url'], field))
count += 1
except re.error:
console('Invalid regular expression')
break
console('%s of %s entries matched' % (count, len(task.entries)))
console('Bye!')
@event('plugin.register')
def register_plugin():
plugin.register(PluginTryRegexp, '--try-regexp', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument('--try-regexp', action='store_true', dest='try_regexp', default=False,
help='try regular expressions interactively')
| oxc/Flexget | flexget/plugins/cli/try_regexp.py | Python | mit | 2,592 |
# Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import click
from molecule import util
from molecule.command import base
from molecule.dependency import ansible_galaxy
from molecule.dependency import shell
class Dependency(base.Base):
def execute(self, exit=True):
"""
Execute the actions that should run prior to a converge and return a
tuple.
:param exit: (Unused) Provided to complete method signature.
:return: Return a tuple provided by :meth:`.AnsiblePlaybook.execute`.
"""
debug = self.args.get('debug')
if self.molecule.state.installed_deps:
return (None, None)
dependency_name = self.molecule.dependency
if dependency_name == 'galaxy':
dd = self.molecule.config.config.get('dependency')
if dd.get('requirements_file'):
msg = "Downloading dependencies with '{}'...".format(
dependency_name)
util.print_info(msg)
g = ansible_galaxy.AnsibleGalaxy(
self.molecule.config.config, debug=debug)
g.execute()
self.molecule.state.change_state('installed_deps', True)
elif dependency_name == 'shell':
dd = self.molecule.config.config.get('dependency')
if dd.get('command'):
msg = "Downloading dependencies with '{}'...".format(
dependency_name)
util.print_info(msg)
s = shell.Shell(self.molecule.config.config, debug=debug)
s.execute()
self.molecule.state.change_state('installed_deps', True)
return (None, None)
@click.command()
@click.pass_context
def dependency(ctx): # pragma: no cover
""" Perform dependent actions on the current role. """
d = Dependency(ctx.obj.get('args'), {})
d.execute
util.sysexit(d.execute()[0])
| rgreinho/molecule | molecule/command/dependency.py | Python | mit | 2,997 |
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.uberdog.RejectCode import RejectCode
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
class AvatarFriendsManager(DistributedObjectGlobal):
notify = directNotify.newCategory('AvatarFriendsManager')
def __init__(self, cr):
DistributedObjectGlobal.__init__(self, cr)
self.reset()
def reset(self):
self.avatarFriendsList = set()
self.avatarId2Info = {}
self.invitedAvatarsList = []
self.ignoredAvatarList = []
def addIgnore(self, avId):
if avId not in self.ignoredAvatarList:
self.ignoredAvatarList.append(avId)
base.cr.centralLogger.writeClientEvent('ignoring %s' % (avId,))
messenger.send('AvatarIgnoreChange')
def removeIgnore(self, avId):
if avId in self.ignoredAvatarList:
self.ignoredAvatarList.remove(avId)
base.cr.centralLogger.writeClientEvent('stopped ignoring %s' % (avId,))
messenger.send('AvatarIgnoreChange')
def checkIgnored(self, avId):
return avId and avId in self.ignoredAvatarList
def sendRequestInvite(self, avId):
self.notify.debugCall()
self.sendUpdate('requestInvite', [avId])
self.invitedAvatarsList.append(avId)
def sendRequestRemove(self, avId):
self.notify.debugCall()
self.sendUpdate('requestRemove', [avId])
if avId in self.invitedAvatarsList:
self.invitedAvatarsList.remove(avId)
def friendConsidering(self, avId):
self.notify.debugCall()
messenger.send(OTPGlobals.AvatarFriendConsideringEvent, [1, avId])
def invitationFrom(self, avId, avatarName):
self.notify.debugCall()
messenger.send(OTPGlobals.AvatarFriendInvitationEvent, [avId, avatarName])
def retractInvite(self, avId):
self.notify.debugCall()
messenger.send(OTPGlobals.AvatarFriendRetractInviteEvent, [avId])
if avId in self.invitedAvatarsList:
self.invitedAvatarsList.remove(avId)
def rejectInvite(self, avId, reason):
self.notify.debugCall()
messenger.send(OTPGlobals.AvatarFriendRejectInviteEvent, [avId, reason])
if avId in self.invitedAvatarsList:
self.invitedAvatarsList.remove(avId)
def rejectRemove(self, avId, reason):
self.notify.debugCall()
messenger.send(OTPGlobals.AvatarFriendRejectRemoveEvent, [avId, reason])
def updateAvatarFriend(self, avId, info):
if hasattr(info, 'avatarId') and not info.avatarId and avId:
info.avatarId = avId
if avId not in self.avatarFriendsList:
self.avatarFriendsList.add(avId)
self.avatarId2Info[avId] = info
messenger.send(OTPGlobals.AvatarFriendAddEvent, [avId, info])
if self.avatarId2Info[avId].onlineYesNo != info.onlineYesNo:
base.talkAssistant.receiveFriendUpdate(avId, info.getName(), info.onlineYesNo)
self.avatarId2Info[avId] = info
messenger.send(OTPGlobals.AvatarFriendUpdateEvent, [avId, info])
if avId in self.invitedAvatarsList:
self.invitedAvatarsList.remove(avId)
messenger.send(OTPGlobals.AvatarNewFriendAddEvent, [avId])
def removeAvatarFriend(self, avId):
self.avatarFriendsList.remove(avId)
self.avatarId2Info.pop(avId, None)
messenger.send(OTPGlobals.AvatarFriendRemoveEvent, [avId])
return
def setFriends(self, avatarIds):
self.notify.debugCall()
def isFriend(self, avId):
return self.isAvatarFriend(avId)
def isAvatarFriend(self, avId):
return avId in self.avatarFriendsList
def getFriendInfo(self, avId):
return self.avatarId2Info.get(avId)
def countTrueFriends(self):
count = 0
for id in self.avatarId2Info:
if self.avatarId2Info[id].openChatFriendshipYesNo:
count += 1
return count
| ToonTownInfiniteRepo/ToontownInfinite | otp/friends/AvatarFriendsManager.py | Python | mit | 4,080 |
#!/usr/bin/env python3
# Update plural forms expressions from the data collected by Unicode Consortium
# (see http://www.unicode.org/cldr/charts/supplemental/language_plural_rules.html),
# but from a JSON version by Transifex folks
import os.path
import sys
import urllib.request
import re
import gettext
import json
import subprocess
from tempfile import TemporaryDirectory
import xml.etree.ElementTree as ET
PRISM_COMPONENTS_URL = 'https://github.com/PrismJS/prism/raw/master/components.json'
LANGUAGE_MAP_URL = 'https://github.com/blakeembrey/language-map/raw/master/languages.json'
# resolve ambiguities:
OVERRIDES = {
'h' : 'cpp',
'inc' : 'php',
'cake' : 'coffeescript',
'es' : 'javascript',
'fcgi' : 'lua',
'cgi' : 'perl',
'pl' : 'perl',
'pro' : 'perl',
'ts' : 'typescript',
'tsx' : 'typescript',
'sch' : 'scheme',
'cs' : 'csharp',
'st' : 'smalltalk',
}
# known irrelevant languages:
BLACKLIST = set([
'glsl', 'nginx', 'apacheconf', 'matlab', 'opencl', 'puppet', 'reason', 'renpy',
'plsql', 'sql', 'tex',
])
# ...and extensions:
BLACKLIST_EXT = set([
'spec', 'pluginspec', 'ml',
])
MARKER_BEGIN = "// Code generated with scripts/extract-fileviewer-mappings.py begins here"
MARKER_END = "// Code generated with scripts/extract-fileviewer-mappings.py ends here"
prism_langs = json.loads(urllib.request.urlopen(PRISM_COMPONENTS_URL).read().decode('utf-8'))['languages']
del prism_langs['meta']
language_map = json.loads(urllib.request.urlopen(LANGUAGE_MAP_URL).read().decode('utf-8'))
prism_known = {}
for lang, data in prism_langs.items():
prism_known[lang] = lang
for a in data.get('alias', []):
prism_known[a] = lang
ext_to_lang = {}
for lang, data in language_map.items():
lang = lang.lower()
lango = lang
if not lang in prism_known:
for a in data.get('aliases', []):
if a in prism_known:
lang = a
break
if lang not in prism_known:
continue
if lang in BLACKLIST:
continue
for ext in data.get('extensions', []):
assert ext[0] == '.'
ext = ext[1:].lower()
if ext in BLACKLIST_EXT:
continue
if ext != lang:
if ext in ext_to_lang:
if ext in OVERRIDES:
ext_to_lang[ext] = OVERRIDES[ext]
else:
sys.stderr.write(f'SKIPPING due to extension conflict: {ext} both {lang} and {ext_to_lang[ext]}\n')
ext_to_lang[ext] = lang
else:
ext_to_lang[ext] = lang
output = f'{MARKER_BEGIN}\n\n'
for ext in sorted(ext_to_lang.keys()):
lang = ext_to_lang[ext]
output += f'{{ "{ext}", "{lang}" }},\n'
output += f'\n{MARKER_END}\n'
if os.path.isfile("src/fileviewer.extensions.h"):
outfname = "src/fileviewer.extensions.h"
else:
raise RuntimeError("run this script from project root directory")
with open(outfname, "rt") as f:
orig_content = f.read()
content = re.sub('%s(.*?)%s' % (MARKER_BEGIN, MARKER_END),
output,
orig_content,
0,
re.DOTALL)
with open(outfname, "wt") as f:
f.write(content)
print(output)
sys.stderr.write(f'Generated code written to {outfname}\n') | vslavik/poedit | scripts/extract-fileviewer-mappings.py | Python | mit | 3,349 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-13 18:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProxyGrantingTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(blank=True, max_length=255, null=True)),
('pgtiou', models.CharField(blank=True, max_length=255, null=True)),
('pgt', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SessionTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(max_length=255)),
('ticket', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='proxygrantingticket',
unique_together=set([('session_key', 'user')]),
),
]
| bgroff/django-cas-ng | django_cas_ng/migrations/0001_initial.py | Python | mit | 1,628 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
MAX_REPLACEMENT_LIMIT = 100
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def txToHex(tx):
return binascii.hexlify(tx.serialize()).decode('utf-8')
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
binascii.hexlify(tx2.serialize()).decode('utf-8')
signed_tx = node.signrawtransaction(binascii.hexlify(tx2.serialize()).decode('utf-8'))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print "Running test simple doublespend..."
self.test_simple_doublespend()
print "Running test doublespend chain..."
self.test_doublespend_chain()
print "Running test doublespend tree..."
self.test_doublespend_tree()
print "Running test replacement feeperkb..."
self.test_replacement_feeperkb()
print "Running test spends of conflicting outputs..."
self.test_spends_of_conflicting_outputs()
print "Running test new unconfirmed inputs..."
self.test_new_unconfirmed_inputs()
print "Running test too many replacements..."
self.test_too_many_replacements()
print "Running test opt-in..."
self.test_opt_in()
print "Running test prioritised transactions..."
self.test_prioritised_transactions()
print "Passed\n"
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = 0.0001*COIN
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = 0.0001*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], 1.2*COIN)
utxo2 = make_utxo(self.nodes[0], 3.0*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(1.1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], 1.1*COIN)
unconfirmed_utxo = make_utxo(self.nodes[0], 0.1*COIN, False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = 0.0001*COIN
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print tx1b_txid
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(0.9*COIN, CScript([b'c'])), CTxOut(0.9*COIN, CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(0.5*COIN, CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(0.5*COIN, CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(1.01*COIN, CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| accraze/bitcoin | qa/rpc-tests/replace-by-fee.py | Python | mit | 21,993 |
import copy
from django.conf import settings
from django.test import override_settings
def override_openstack_settings(**kwargs):
os_settings = copy.deepcopy(settings.WALDUR_OPENSTACK)
os_settings.update(kwargs)
return override_settings(WALDUR_OPENSTACK=os_settings)
| opennode/nodeconductor-assembly-waldur | src/waldur_openstack/openstack/tests/helpers.py | Python | mit | 282 |
from __future__ import print_function
from __future__ import unicode_literals
import re
from netmiko.cisco_base_connection import CiscoSSHConnection
class CiscoXrSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.set_base_prompt()
self.disable_paging()
self.set_terminal_width(command='terminal width 511')
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):
"""IOS-XR requires you not exit from configuration mode."""
return super(CiscoXrSSH, self).send_config_set(config_commands=config_commands,
exit_config_mode=False, **kwargs)
def commit(self, confirm=False, confirm_delay=None, comment='', label='', delay_factor=1):
"""
Commit the candidate configuration.
default (no options):
command_string = commit
confirm and confirm_delay:
command_string = commit confirmed <confirm_delay>
label (which is a label name):
command_string = commit label <label>
comment:
command_string = commit comment <comment>
supported combinations
label and confirm:
command_string = commit label <label> confirmed <confirm_delay>
label and comment:
command_string = commit label <label> comment <comment>
All other combinations will result in an exception.
failed commit message:
% Failed to commit one or more configuration items during a pseudo-atomic operation. All
changes made have been reverted. Please issue 'show configuration failed [inheritance]'
from this session to view the errors
message XR shows if other commits occurred:
One or more commits have occurred from other configuration sessions since this session
started or since the last commit was made from this session. You can use the 'show
configuration commit changes' command to browse the changes.
Exit of configuration mode with pending changes will cause the changes to be discarded and
an exception to be generated.
"""
delay_factor = self.select_delay_factor(delay_factor)
if confirm and not confirm_delay:
raise ValueError("Invalid arguments supplied to XR commit")
if confirm_delay and not confirm:
raise ValueError("Invalid arguments supplied to XR commit")
if comment and confirm:
raise ValueError("Invalid arguments supplied to XR commit")
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
label = str(label)
error_marker = 'Failed to'
alt_error_marker = 'One or more commits have occurred from other'
# Select proper command string based on arguments provided
if label:
if comment:
command_string = 'commit label {0} comment {1}'.format(label, comment)
elif confirm:
command_string = 'commit label {0} confirmed {1}'.format(label, str(confirm_delay))
else:
command_string = 'commit label {0}'.format(label)
elif confirm:
command_string = 'commit confirmed {0}'.format(str(confirm_delay))
elif comment:
command_string = 'commit comment {0}'.format(comment)
else:
command_string = 'commit'
# Enter config mode (if necessary)
output = self.config_mode()
output += self.send_command_expect(command_string, strip_prompt=False, strip_command=False,
delay_factor=delay_factor)
if error_marker in output:
raise ValueError("Commit failed with the following errors:\n\n{0}".format(output))
if alt_error_marker in output:
# Other commits occurred, don't proceed with commit
output += self.send_command_timing("no", strip_prompt=False, strip_command=False,
delay_factor=delay_factor)
raise ValueError("Commit failed with the following errors:\n\n{0}".format(output))
return output
def exit_config_mode(self, exit_config='end'):
"""Exit configuration mode."""
output = ''
if self.check_config_mode():
output = self.send_command_timing(exit_config, strip_prompt=False, strip_command=False)
if "Uncommitted changes found" in output:
output += self.send_command_timing('no\n', strip_prompt=False, strip_command=False)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
@staticmethod
def normalize_linefeeds(a_string):
"""Convert '\r\n','\r\r\n', '\n\r', or '\r' to '\n."""
newline = re.compile(r'(\r\r\n|\r\n|\n\r|\r)')
return newline.sub('\n', a_string)
| shamanu4/netmiko | netmiko/cisco/cisco_xr_ssh.py | Python | mit | 5,165 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900)
print("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
print("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"])
print("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
print ("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"])
print ("Success")
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
print("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
print("Test wallet re-scan")
self.wallet_test()
print("Done")
if __name__ == '__main__':
PruneTest().main()
| psionin/smartcoin | qa/rpc-tests/pruning.py | Python | mit | 21,037 |
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test_support':
raise ImportError, 'test_support must be imported from the test package'
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
import os
try:
os.unlink(filename)
except OSError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
import os
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def bind_port(sock, host='', preferred_port=54321):
"""Try to bind the sock to a port. If we are running multiple
tests and we don't try multiple ports, the test can fails. This
makes the test more robust."""
import socket, errno
# some random ports that hopefully no one is listening on.
for port in [preferred_port, 9907, 10243, 32999]:
try:
sock.bind((host, port))
return port
except socket.error, (err, msg):
if err != errno.EADDRINUSE:
raise
print >>sys.__stderr__, \
' WARNING: failed to listen on port %d, trying another' % port
raise TestFailed, 'unable to find port to listen on'
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del os, fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
def open_urlresource(url):
import urllib, urlparse
import os.path
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
# Hack to get at the maximum value an internal index can take.
class _Dummy:
def __getslice__(self, i, j):
return j
MAX_Py_ssize_t = _Dummy()[:]
def set_memlimit(limit):
import re
global max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independant of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, (unittest.TestSuite, unittest.TestCase)):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
if len(classes)==1:
testclass = classes[0]
else:
testclass = None
run_suite(suite, testclass)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
import os
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
| ruamel/ordereddict | test/unit/test_support.py | Python | mit | 17,653 |
#====================================================================================================
# C L A S S E S concerning the site description
#====================================================================================================
#---------------------------------------------------------------------------------------------------
"""
Class: SiteProperties(siteName='')
Each site will be fully described for our application in this class.
"""
#---------------------------------------------------------------------------------------------------
import time, statistics
class SiteProperties:
"A SiteProperties defines all needed site properties."
def __init__(self, siteName):
self.name = siteName
self.datasetRanks = {}
self.rankSum = 0
self.datasetSizes = {}
self.dsetIsValid = {}
self.dsetIsCustodial = {}
self.dsetLastCopy = {}
self.dsetIsPartial = {}
self.deprecated = {}
self.dsetReqTime = {}
self.dsetUpdTime = {}
self.dsetIsDone = {}
self.dsetNotUsedOnTape = {}
self.wishList = []
self.datasetsToDelete = []
self.protectedList = []
self.siteSizeGbV = 0
self.spaceTakenV = 0
self.spaceNotUsed = 0
self.spaceLCp = 0
self.space2free = 0
self.deleted = 0
self.protected = 0
self.globalDsetIndex = 0
self.epochTime = int(time.time())
def addDataset(self,dset,rank,size,valid,partial,custodial,depr,reqtime,updtime,wasused,isdone):
self.dsetIsValid[dset] = valid
self.dsetIsPartial[dset] = partial
self.dsetIsCustodial[dset] = custodial
self.datasetRanks[dset] = rank
self.datasetSizes[dset] = size
if depr:
self.deprecated[dset] = depr
self.spaceTakenV = self.spaceTakenV + size
self.dsetIsDone[dset] = isdone
self.dsetReqTime[dset] = reqtime
self.dsetUpdTime[dset] = updtime
self.rankSum = self.rankSum + rank*size
if wasused == 0:
self.spaceNotUsed = self.spaceNotUsed + size
def makeWishList(self, dataPropers, ncopyMin, banInvalid=True):
space = 0
self.wishList = []
space2free = self.space2free
addedExtra = 0
counter = 0
for datasetName in sorted(self.datasetRanks.keys(), cmp=self.compare):
counter = counter + 1
if counter < self.globalDsetIndex:
continue
if space > (space2free-self.deleted):
break
if datasetName in self.datasetsToDelete:
continue
if datasetName in self.protectedList:
continue
#custodial set can't be on deletion wish list
if self.dsetIsCustodial[datasetName] :
continue
#if dataPropers[datasetName].daysSinceUsed() > 540:
if dataPropers[datasetName].isFullOnTape():
#delta = (self.epochTime - self.dsetUpdTime[datasetName])/(60*60*24)
if dataPropers[datasetName].getGlobalRank() > 500:
#if delta > 500:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
dataPropers[datasetName].kickFromPool = True
print "exp at " + self.name + ": " + datasetName
#print datasetName
#addedExtra = addedExtra + 1
continue
if "/RECO" in datasetName:
delta = (self.epochTime - self.dsetUpdTime[datasetName])/(60*60*24)
#if dataPropers[datasetName].daysSinceUsed() > 180 and delta>180:
if delta > 180:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
dataPropers[datasetName].kickFromPool = True
print "RECO " + self.name + ": " + datasetName
continue
else:
continue
#non-valid dataset can't be on deletion list
if banInvalid == True:
if not self.dsetIsValid[datasetName]:
continue
dataPr = dataPropers[datasetName]
if dataPr.nSites() > ncopyMin:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
self.globalDsetIndex = counter
def hasMoreToDelete(self, dataPropers, ncopyMin, banInvalid):
counter = 0
if self.globalDsetIndex >= len(self.datasetRanks.keys()):
return False
for datasetName in sorted(self.datasetRanks.keys(), cmp=self.compare):
counter = counter + 1
if counter < self.globalDsetIndex:
continue
if '/MINIAOD' in datasetName:
ncopyMinTemp = 3
else:
ncopyMinTemp = ncopyMin
if datasetName in self.datasetsToDelete:
continue
if datasetName in self.protectedList:
continue
#custodial set can't be on deletion wish list
if self.dsetIsCustodial[datasetName] :
continue
#non-valid dataset can't be on deletion list
if banInvalid == True:
if not self.dsetIsValid[datasetName]:
continue
if datasetName in self.wishList:
continue
dataPr = dataPropers[datasetName]
if dataPr.nSites() <= ncopyMinTemp:
continue
return True
return False
def onWishList(self,dset):
if dset in self.wishList:
return True
return False
def onProtectedList(self,dset):
if dset in self.protectedList:
return True
return False
def wantToDelete(self):
if self.deleted < self.space2free:
return True
else:
return False
def grantWish(self,dset):
if dset in self.protectedList:
return False
if dset in self.datasetsToDelete:
return False
#if self.deleted > self.space2free:
# return False
self.datasetsToDelete.append(dset)
self.deleted = self.deleted + self.datasetSizes[dset]
return True
def revokeWish(self,dset):
if dset in self.datasetsToDelete:
self.datasetsToDelete.remove(dset)
self.deleted = self.deleted - self.datasetSizes[dset]
def canBeLastCopy(self,dset,banInvalid):
if not banInvalid:
return True
#can't be partial dataset
if dset not in self.dsetIsPartial:
return False
if self.dsetIsPartial[dset] :
return False
#can't be non-valid dataset
if not self.dsetIsValid[dset]:
return False
return True
def pinDataset(self,dset):
if dset in self.datasetsToDelete:
return False
#can't pin partial dataset
if self.dsetIsPartial[dset] :
return False
#can't pin non-valid dataset
if not self.dsetIsValid[dset]:
return False
self.protectedList.append(dset)
self.protected = self.protected + self.datasetSizes[dset]
if dset in self.wishList:
self.wishList.remove(dset)
return True
def lastCopySpace(self,datasets,nCopyMin):
space = 0
self.dsetLastCopy = {}
for dset in self.datasetSizes.keys():
if dset in self.datasetsToDelete:
continue
dataset = datasets[dset]
remaining = dataset.nSites() - dataset.nBeDeleted()
if remaining <= nCopyMin:
self.dsetLastCopy[dset] = 1
space = space + self.datasetSizes[dset]
self.spaceLCp = space
def setSiteSize(self,size):
self.siteSizeGbV = size
def siteSizeGb(self):
return self.siteSizeGbV
def dsetRank(self,set):
return self.datasetRanks[set]
def dsetSize(self,set):
return self.datasetSizes[set]
def isPartial(self,set):
return self.dsetIsPartial[set]
def siteName(self):
return self.name
def spaceTaken(self):
return self.spaceTakenV
def spaceDeleted(self):
return self.deleted
def spaceProtected(self):
return self.protected
def spaceFree(self):
return self.siteSizeGbV - (self.spaceTakenV - self.deleted)
def spaceLastCp(self):
return self.spaceLCp
def isDeprecated(self,dset):
if dset in self.deprecated:
return True
return False
def spaceDeprecated(self):
size = 0
for dset in self.deprecated:
size = size + self.datasetSizes[dset]
return size
def spaceIncomplete(self):
size = 0;
for dset in self.dsetIsPartial:
if self.dsetIsPartial[dset]:
size = size + self.datasetSizes[dset]
return size
def spaceCustodial(self):
size = 0;
for dset in self.dsetIsCustodial:
if self.dsetIsCustodial[dset]:
size = size + self.datasetSizes[dset]
return size
def spaceUtouchable(self):
size = 0
for dset in self.dsetLastCopy:
size = size + self.datasetSizes[dset]
for dset in self.dsetIsCustodial:
if dset in self.dsetLastCopy:
continue
if self.dsetIsCustodial[dset]:
size = size + self.datasetSizes[dset]
return size
def nsetsDeprecated(self):
nsets = 0
for dset in self.deprecated:
nsets = nsets + 1
return nsets
def hasDataset(self,dset):
if dset in self.datasetRanks:
return True
else:
return False
def willDelete(self,dset):
if dset in self.datasetsToDelete:
return True
else:
return False
def allSets(self):
return sorted(self.datasetRanks.keys(), cmp=self.compare)
def delTargets(self):
return sorted(self.datasetsToDelete, cmp=self.compare)
def protectedSets(self):
return sorted(self.protectedList, cmp=self.compare)
def setSpaceToFree(self,size):
self.space2free = size
def reqTime(self,dset):
return self.dsetReqTime[dset]
def dsetLoadTime(self,dset):
return (self.dsetUpdTime[dset] - self.dsetReqTime[dset])
def spaceUnused(self):
return self.spaceNotUsed
def siteRank(self):
if self.spaceTakenV == 0:
return 0
return self.rankSum/self.spaceTakenV
def medianRank(self):
if len(self.datasetRanks.values()) > 0:
return statistics.median(self.datasetRanks.values())
return 0
def dsetIsStuck(self,dset):
if self.dsetIsDone[dset] == 0:
reqtime = self.dsetReqTime[dset]
if (self.epochTime - reqtime) > 60*60*24*14:
return 1
return 0
def considerForStats(self,dset):
if self.dsetLoadTime(dset) > 60*60*24*14:
return False
if self.dsetLoadTime(dset) <= 0:
return False
if (self.epochTime - self.dsetReqTime[dset]) > 60*60*24*90:
return False
return True
def getDownloadStats(self):
loadSize = 0
loadTime = 0
stuck = 0
for dset in self.datasetSizes:
if self.dsetIsStuck(dset) == 1:
stuck = stuck + 1
continue
if not self.considerForStats(dset):
continue
if self.datasetSizes[dset] > 10:
loadSize = loadSize + self.datasetSizes[dset]
loadTime = loadTime + self.dsetLoadTime(dset)
speed = 0
if loadTime > 0:
speed = loadSize/loadTime*(60*60*24)
return (speed, loadSize, stuck)
def getAverage(self,array):
if len(array) < 3: return 0
sortA = sorted(array)
diff = 100
prevMean = sortA[len(sortA)/2]
prevRms = sortA[len(sortA)-1] - sortA[0]
print sortA
while diff > 0.01:
ave = 0
aveSq = 0
nit = 0
for i in range(1, len(sortA)):
if abs(sortA[i] - prevMean) > 1.6*prevRms:
continue
ave = ave + sortA[i]
aveSq = aveSq + sortA[i]*sortA[i]
nit = nit + 1
ave = ave/nit
rms = math.sqrt(aveSq/nit - ave*ave)
diff = abs(ave - prevMean)/prevMean
prevMean = ave
prevRms = rms
return prevMean
def compare(self,item1, item2):
r1 = self.datasetRanks[item1]
r2 = self.datasetRanks[item2]
if r1 < r2:
return 1
elif r1 > r2:
return -1
else:
return 0
| sidnarayanan/IntelROCCS | Detox/python/siteProperties.py | Python | mit | 12,964 |
""" This test need a set of pins which can be set as inputs and have no external
pull up or pull down connected.
"""
from machine import Pin
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
elif 'WiPy' in mch:
pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
else:
raise Exception('Board not supported!')
def test_noinit():
for p in pin_map:
pin = Pin(p)
pin.value()
def test_pin_read(pull):
# enable the pull resistor on all pins, then read the value
for p in pin_map:
pin = Pin(p, mode=Pin.IN, pull=pull)
for p in pin_map:
print(pin())
def test_pin_af():
for p in pin_map:
for af in Pin(p).alt_list():
if af[1] <= max_af_idx:
Pin(p, mode=Pin.ALT, alt=af[1])
Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1])
# test un-initialized pins
test_noinit()
# test with pull-up and pull-down
test_pin_read(Pin.PULL_UP)
test_pin_read(Pin.PULL_DOWN)
# test all constructor combinations
pin = Pin(pin_map[0])
pin = Pin(pin_map[0], mode=Pin.IN)
pin = Pin(pin_map[0], mode=Pin.OUT)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=None)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN)
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP)
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP)
test_pin_af() # try the entire af range on all pins
# test pin init and printing
pin = Pin(pin_map[0])
pin.init(mode=Pin.IN)
print(pin)
pin.init(Pin.IN, Pin.PULL_DOWN)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
print(pin)
# test value in OUT mode
pin = Pin(pin_map[0], mode=Pin.OUT)
pin.value(0)
pin.toggle() # test toggle
print(pin())
pin.toggle() # test toggle again
print(pin())
# test different value settings
pin(1)
print(pin.value())
pin(0)
print(pin.value())
pin.value(1)
print(pin())
pin.value(0)
print(pin())
# test all getters and setters
pin = Pin(pin_map[0], mode=Pin.OUT)
# mode
print(pin.mode() == Pin.OUT)
pin.mode(Pin.IN)
print(pin.mode() == Pin.IN)
# pull
pin.pull(None)
print(pin.pull() == None)
pin.pull(Pin.PULL_DOWN)
print(pin.pull() == Pin.PULL_DOWN)
# drive
pin.drive(Pin.MED_POWER)
print(pin.drive() == Pin.MED_POWER)
pin.drive(Pin.HIGH_POWER)
print(pin.drive() == Pin.HIGH_POWER)
# id
print(pin.id() == pin_map[0])
# all the next ones MUST raise
try:
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.IN) # incorrect drive value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value
except Exception:
print('Exception')
try:
pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af
except Exception:
print('Exception')
try:
pin.mode(Pin.PULL_UP) # incorrect pin mode
except Exception:
print('Exception')
try:
pin.pull(Pin.OUT) # incorrect pull
except Exception:
print('Exception')
try:
pin.drive(Pin.IN) # incorrect drive strength
except Exception:
print('Exception')
try:
pin.id('ABC') # id cannot be set
except Exception:
print('Exception')
| feilongfl/micropython | tests/wipy/pin.py | Python | mit | 4,685 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
print("tests with regular server.")
TestUtil.clientServerTest()
print("tests with collocated server.")
TestUtil.collocatedTest()
| chujieyang/ice | python/test/Ice/ami/run.py | Python | gpl-2.0 | 907 |
"""
This module implements Exception classes
http://www.python.org/dev/peps/pep-0249/
"""
class Error(StandardError):
"""
Base class for all driver error exceptions
"""
def __init__(self, err_id = None, err_msg = None):
self._err_id = err_id or -1
self._err_msg = err_msg
#def __str__(self):
# return '%d: %s' % (self._err_id, self._err_msg)
class Warning(StandardError):
"""
Warnings exception
"""
pass
class InterfaceError(Error):
"""
Exception for interface errors
"""
pass
class DatabaseError(Error):
"""
Exception for database errors
"""
pass
class InternalError(DatabaseError):
"""
Exception for internal errors
"""
pass
class OperationalError(DatabaseError):
"""
Exception for database operations errors
"""
pass
class ProgrammingError(DatabaseError):
"""
Exception for programming errors
"""
pass
class IntegrityError(DatabaseError):
"""
Exception for data relational integrity errors
"""
pass
class DataError(DatabaseError):
"""
Exception for data errors
"""
pass
class NotSupportedError(DatabaseError):
"""
Exception for unsupported database operations
"""
pass
| cubrid-talks/CUBRIDPy | trunk/CUBRIDPy/errors.py | Python | gpl-2.0 | 1,270 |
###############################################################################
# Name: ed_print.py #
# Purpose: Editra's printer class #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Printer class for creating and managing printouts from a StyledTextCtrl.
Classes:
- L{EdPrinter}: Class for managing printing and providing print dialogs
- L{EdPrintout}: Scales and renders the given document to a printer.
@summary: Printer Classes for printing text from an STC
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__cvsid__ = "$Id: ed_print.py 67499 2011-04-15 20:33:40Z CJP $"
__revision__ = "$Revision: 67499 $"
#--------------------------------------------------------------------------#
# Imports
import wx
import wx.stc
# Editra Imports
import ed_glob
import util
import extern.stcprint as stcprint
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
# Globals
COLOURMODES = { ed_glob.PRINT_BLACK_WHITE : wx.stc.STC_PRINT_BLACKONWHITE,
ed_glob.PRINT_COLOR_WHITE : wx.stc.STC_PRINT_COLOURONWHITE,
ed_glob.PRINT_COLOR_DEF : wx.stc.STC_PRINT_COLOURONWHITEDEFAULTBG,
ed_glob.PRINT_INVERT : wx.stc.STC_PRINT_INVERTLIGHT,
ed_glob.PRINT_NORMAL : wx.stc.STC_PRINT_NORMAL }
#--------------------------------------------------------------------------#
class EdPrinter(object):
"""Printer Class for the editor
@note: current font size is fixed at 12 point for printing
"""
def __init__(self, parent, mode=ed_glob.PRINT_NORMAL):
"""Initializes the Printer
@param parent: parent window
@keyword mode: printer mode
"""
super(EdPrinter, self).__init__()
# Attributes
self.stc = None
self.title = wx.EmptyString
self.parent = parent
self.print_mode = mode
self.print_data = wx.PrintData()
self.margins = (wx.Point(15,15), wx.Point(15,15))
def CreatePrintout(self):
"""Creates a printout of the current stc window
@return: a printout object
"""
colour = COLOURMODES[self.print_mode]
dlg_data = wx.PageSetupDialogData(self.print_data)
dlg_data.SetPrintData(self.print_data)
dlg_data.SetMarginTopLeft(self.margins[0])
dlg_data.SetMarginBottomRight(self.margins[1])
fname = self.stc.GetFileName()
printout = stcprint.STCPrintout(self.stc, page_setup_data=dlg_data,
print_mode=colour, title=self.title,
job_title=fname)
return printout
def PageSetup(self):
"""Opens a print setup dialog and save print settings.
@return: None
"""
dlg_data = wx.PageSetupDialogData(self.print_data)
dlg_data.SetPrintData(self.print_data)
dlg_data.SetDefaultMinMargins(True)
dlg_data.SetMarginTopLeft(self.margins[0])
dlg_data.SetMarginBottomRight(self.margins[1])
print_dlg = wx.PageSetupDialog(self.parent, dlg_data)
if print_dlg.ShowModal() == wx.ID_OK:
self.print_data = wx.PrintData(dlg_data.GetPrintData())
self.print_data.SetPaperId(dlg_data.GetPaperId())
self.margins = (dlg_data.GetMarginTopLeft(),
dlg_data.GetMarginBottomRight())
print_dlg.Destroy()
def Preview(self):
"""Preview the Print
@return: None
"""
printout = self.CreatePrintout()
printout2 = self.CreatePrintout()
preview = wx.PrintPreview(printout, printout2, self.print_data)
preview.SetZoom(150)
if preview.IsOk():
pre_frame = wx.PreviewFrame(preview, self.parent,
_("Print Preview"))
dsize = wx.GetDisplaySize()
pre_frame.SetInitialSize((self.stc.GetSize()[0],
dsize.GetHeight() - 100))
pre_frame.Initialize()
pre_frame.Show()
else:
wx.MessageBox(_("Failed to create print preview"),
_("Print Error"),
style=wx.ICON_ERROR|wx.OK)
def Print(self):
"""Prints the document
@postcondition: the current document is printed
"""
pdd = wx.PrintDialogData(self.print_data)
printer = wx.Printer(pdd)
printout = self.CreatePrintout()
result = printer.Print(self.parent, printout)
if result:
dlg_data = printer.GetPrintDialogData()
self.print_data = wx.PrintData(dlg_data.GetPrintData())
elif printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(_("There was an error when printing.\n"
"Check that your printer is properly connected."),
_("Printer Error"),
style=wx.ICON_ERROR|wx.OK)
printout.Destroy()
def SetColourMode(self, mode):
"""Sets the color mode that the text is to be rendered with
@param mode: mode to set the printer to use
@return: whether mode was set or not
@rtype: boolean
"""
if mode in COLOURMODES:
self.print_mode = mode
ret = True
else:
ret = False
return ret
def SetStc(self, stc):
"""Set the stc we are printing for
@param stc: instance of wx.stc.StyledTextCtrl
@note: MUST be called prior to any other print operations
"""
self.stc = stc
| garrettcap/Bulletproof-Backup | wx/tools/Editra/src/ed_print.py | Python | gpl-2.0 | 6,013 |
import pytest
from cfme.physical.provider.lenovo import LenovoProvider
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [pytest.mark.tier(3), pytest.mark.provider([LenovoProvider], scope="module")]
@pytest.fixture(scope="module")
def physical_server(appliance, provider, setup_provider_modscope):
# Get and return the first physical server
physical_servers = appliance.collections.physical_servers.all(provider)
yield physical_servers[0]
def test_physical_server_details(physical_server):
"""Navigate to the physical server details page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server_view = navigate_to(physical_server, 'Details')
assert physical_server_view.is_displayed
def test_physical_server_details_dropdowns(physical_server):
"""Navigate to the physical server details page and verify that the menus are present
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server_view = navigate_to(physical_server, 'Details')
configuration_items = physical_server_view.toolbar.configuration.items
assert "Refresh Relationships and Power States" in configuration_items
power_items = physical_server_view.toolbar.power.items
assert "Power On" in power_items
assert "Power Off" in power_items
assert "Power Off Immediately" in power_items
assert "Restart" in power_items
assert "Restart Immediately" in power_items
assert "Restart to System Setup" in power_items
assert "Restart Management Controller" in power_items
identify_items = physical_server_view.toolbar.identify.items
assert "Blink LED" in identify_items
assert "Turn On LED" in identify_items
assert "Turn Off LED" in identify_items
policy_items = physical_server_view.toolbar.policy.items
assert "Manage Policies" in policy_items
assert "Edit Tags" in policy_items
lifecycle_items = physical_server_view.toolbar.lifecycle.items
assert "Provision Physical Server" in lifecycle_items
monitoring_items = physical_server_view.toolbar.monitoring.items
assert "Timelines" in monitoring_items
def test_network_devices(physical_server):
"""Navigate to the Network Devices page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
num_network_devices = physical_server.num_network_devices()
network_device_view = navigate_to(physical_server, 'NetworkDevices')
assert(network_device_view.is_displayed if num_network_devices != "0" else
not network_device_view.is_displayed)
def test_storage_devices(physical_server):
"""Navigate to the Storage Devices page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
num_storage_devices = physical_server.num_storage_devices()
storage_device_view = navigate_to(physical_server, 'StorageDevices')
assert(storage_device_view.is_displayed if num_storage_devices != "0" else
not storage_device_view.is_displayed)
def test_physical_server_details_stats(physical_server):
"""Navigate to the physical server details page and verify that the stats match
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server.validate_stats(ui=True)
| nachandr/cfme_tests | cfme/tests/physical_infrastructure/ui/test_physical_server_details.py | Python | gpl-2.0 | 3,611 |
#!/usr/bin/env python
'''
Copyright (C) 2001-2002 Matt Chisholm matt@theory.org
Copyright (C) 2008 Joel Holdsworth joel@airwebreathe.org.uk
for AP
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
# standard library
import copy
import math
import cmath
import string
import random
import os
import sys
import re
# local library
import inkex
import simplestyle
import render_alphabetsoup_config
import bezmisc
import simplepath
inkex.localize()
syntax = render_alphabetsoup_config.syntax
alphabet = render_alphabetsoup_config.alphabet
units = render_alphabetsoup_config.units
font = render_alphabetsoup_config.font
# Loads a super-path from a given SVG file
def loadPath( svgPath ):
extensionDir = os.path.normpath(
os.path.join( os.getcwd(), os.path.dirname(__file__) )
)
# __file__ is better then sys.argv[0] because this file may be a module
# for another one.
tree = inkex.etree.parse( extensionDir + "/" + svgPath )
root = tree.getroot()
pathElement = root.find('{http://www.w3.org/2000/svg}path')
if pathElement == None:
return None, 0, 0
d = pathElement.get("d")
width = float(root.get("width"))
height = float(root.get("height"))
return simplepath.parsePath(d), width, height # Currently we only support a single path
def combinePaths( pathA, pathB ):
if pathA == None and pathB == None:
return None
elif pathA == None:
return pathB
elif pathB == None:
return pathA
else:
return pathA + pathB
def reverseComponent(c):
nc = []
last = c.pop()
nc.append(['M', last[1][-2:]])
while c:
this = c.pop()
cmd = last[0]
if cmd == 'C':
nc.append([last[0], last[1][2:4] + last[1][:2] + this[1][-2:]])
else:
nc.append([last[0], this[1][-2:]])
last = this
return nc
def reversePath(sp):
rp = []
component = []
for p in sp:
cmd, params = p
if cmd == 'Z':
rp.extend(reverseComponent(component))
rp.append(['Z', []])
component = []
else:
component.append(p)
return rp
def flipLeftRight( sp, width ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] = width - params[i]
def flipTopBottom( sp, height ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'y':
params[i] = height - params[i]
def solveQuadratic(a, b, c):
det = b*b - 4.0*a*c
if det >= 0: # real roots
sdet = math.sqrt(det)
else: # complex roots
sdet = cmath.sqrt(det)
return (-b + sdet) / (2*a), (-b - sdet) / (2*a)
def cbrt(x):
if x >= 0:
return x**(1.0/3.0)
else:
return -((-x)**(1.0/3.0))
def findRealRoots(a,b,c,d):
if a != 0:
a, b, c, d = 1, b/float(a), c/float(a), d/float(a) # Divide through by a
t = b / 3.0
p, q = c - 3 * t**2, d - c * t + 2 * t**3
u, v = solveQuadratic(1, q, -(p/3.0)**3)
if type(u) == type(0j): # Complex Cubic Root
r = math.sqrt(u.real**2 + u.imag**2)
w = math.atan2(u.imag, u.real)
y1 = 2 * cbrt(r) * math.cos(w / 3.0)
else: # Complex Real Root
y1 = cbrt(u) + cbrt(v)
y2, y3 = solveQuadratic(1, y1, p + y1**2)
if type(y2) == type(0j): # Are y2 and y3 complex?
return [y1 - t]
return [y1 - t, y2 - t, y3 - t]
elif b != 0:
det=c*c - 4.0*b*d
if det >= 0:
return [(-c + math.sqrt(det))/(2.0*b),(-c - math.sqrt(det))/(2.0*b)]
elif c != 0:
return [-d/c]
return []
def getPathBoundingBox( sp ):
box = None
last = None
lostctrl = None
for cmd,params in sp:
segmentBox = None
if cmd == 'M':
# A move cannot contribute to the bounding box
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
if last:
segmentBox = (min(params[4], last[0]), max(params[4], last[0]), min(params[5], last[1]), max(params[5], last[1]))
bx0, by0 = last[:]
bx1, by1, bx2, by2, bx3, by3 = params[:]
# Compute the x limits
a = (-bx0 + 3*bx1 - 3*bx2 + bx3)*3
b = (3*bx0 - 6*bx1 + 3*bx2)*2
c = (-3*bx0 + 3*bx1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
x = (-bx0 + 3*bx1 - 3*bx2 + bx3)*(t**3) + \
(3*bx0 - 6*bx1 + 3*bx2)*(t**2) + \
(-3*bx0 + 3*bx1)*t + \
bx0
segmentBox = (min(segmentBox[0], x), max(segmentBox[1], x), segmentBox[2], segmentBox[3])
# Compute the y limits
a = (-by0 + 3*by1 - 3*by2 + by3)*3
b = (3*by0 - 6*by1 + 3*by2)*2
c = (-3*by0 + 3*by1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
y = (-by0 + 3*by1 - 3*by2 + by3)*(t**3) + \
(3*by0 - 6*by1 + 3*by2)*(t**2) + \
(-3*by0 + 3*by1)*t + \
by0
segmentBox = (segmentBox[0], segmentBox[1], min(segmentBox[2], y), max(segmentBox[3], y))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'A':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
if segmentBox:
if box:
box = (min(segmentBox[0],box[0]), max(segmentBox[1],box[1]), min(segmentBox[2],box[2]), max(segmentBox[3],box[3]))
else:
box = segmentBox
return box
def mxfm( image, width, height, stack ): # returns possibly transformed image
tbimage = image
if ( stack[0] == "-" ): # top-bottom flip
flipTopBottom(tbimage, height)
tbimage = reversePath(tbimage)
stack.pop( 0 )
lrimage = tbimage
if ( stack[0] == "|" ): # left-right flip
flipLeftRight(tbimage, width)
lrimage = reversePath(lrimage)
stack.pop( 0 )
return lrimage
def comparerule( rule, nodes ): # compare node list to nodes in rule
for i in range( 0, len(nodes)): # range( a, b ) = (a, a+1, a+2 ... b-2, b-1)
if (nodes[i] == rule[i][0]):
pass
else: return 0
return 1
def findrule( state, nodes ): # find the rule which generated this subtree
ruleset = syntax[state][1]
nodelen = len(nodes)
for rule in ruleset:
rulelen = len(rule)
if ((rulelen == nodelen) and (comparerule( rule, nodes ))):
return rule
return
def generate( state ): # generate a random tree (in stack form)
stack = [ state ]
if ( len(syntax[state]) == 1 ): # if this is a stop symbol
return stack
else:
stack.append( "[" )
path = random.randint(0, (len(syntax[state][1])-1)) # choose randomly from next states
for symbol in syntax[state][1][path]: # recurse down each non-terminal
if ( symbol != 0 ): # 0 denotes end of list ###
substack = generate( symbol[0] ) # get subtree
for elt in substack:
stack.append( elt )
if (symbol[3]):stack.append( "-" ) # top-bottom flip
if (symbol[4]):stack.append( "|" ) # left-right flip
#else:
#inkex.debug("found end of list in generate( state =", state, ")") # this should be deprecated/never happen
stack.append("]")
return stack
def draw( stack ): # draw a character based on a tree stack
state = stack.pop(0)
#print state,
image, width, height = loadPath( font+syntax[state][0] ) # load the image
if (stack[0] != "["): # terminal stack element
if (len(syntax[state]) == 1): # this state is a terminal node
return image, width, height
else:
substack = generate( state ) # generate random substack
return draw( substack ) # draw random substack
else:
#inkex.debug("[")
stack.pop(0)
images = [] # list of daughter images
nodes = [] # list of daughter names
while (stack[0] != "]"): # for all nodes in stack
newstate = stack[0] # the new state
newimage, width, height = draw( stack ) # draw the daughter state
if (newimage):
tfimage = mxfm( newimage, width, height, stack ) # maybe transform daughter state
images.append( [tfimage, width, height] ) # list of daughter images
nodes.append( newstate ) # list of daughter nodes
else:
#inkex.debug(("recurse on",newstate,"failed")) # this should never happen
return None, 0, 0
rule = findrule( state, nodes ) # find the rule for this subtree
for i in range( 0, len(images)):
currimg, width, height = images[i]
if currimg:
#box = getPathBoundingBox(currimg)
dx = rule[i][1]*units
dy = rule[i][2]*units
#newbox = ((box[0]+dx),(box[1]+dy),(box[2]+dx),(box[3]+dy))
simplepath.translatePath(currimg, dx, dy)
image = combinePaths( image, currimg )
stack.pop( 0 )
return image, width, height
def draw_crop_scale( stack, zoom ): # draw, crop and scale letter image
image, width, height = draw(stack)
bbox = getPathBoundingBox(image)
simplepath.translatePath(image, -bbox[0], 0)
simplepath.scalePath(image, zoom/units, zoom/units)
return image, bbox[1] - bbox[0], bbox[3] - bbox[2]
def randomize_input_string(tokens, zoom ): # generate a glyph starting from each token in the input string
imagelist = []
for i in range(0,len(tokens)):
char = tokens[i]
#if ( re.match("[a-zA-Z0-9?]", char)):
if ( alphabet.has_key(char)):
if ((i > 0) and (char == tokens[i-1])): # if this letter matches previous letter
imagelist.append(imagelist[len(stack)-1])# make them the same image
else: # generate image for letter
stack = string.split( alphabet[char][random.randint(0,(len(alphabet[char])-1))] , "." )
#stack = string.split( alphabet[char][random.randint(0,(len(alphabet[char])-2))] , "." )
imagelist.append( draw_crop_scale( stack, zoom ))
elif( char == " "): # add a " " space to the image list
imagelist.append( " " )
else: # this character is not in config.alphabet, skip it
sys.stderr.write('bad character "%s"\n' % char)
return imagelist
def generate_random_string( tokens, zoom ): # generate a totally random glyph for each glyph in the input string
imagelist = []
for char in tokens:
if ( char == " "): # add a " " space to the image list
imagelist.append( " " )
else:
if ( re.match("[a-z]", char )): # generate lowercase letter
stack = generate("lc")
elif ( re.match("[A-Z]", char )): # generate uppercase letter
stack = generate("UC")
else: # this character is not in config.alphabet, skip it
sys.stderr.write('bad character"%s"\n' % char)
stack = generate("start")
imagelist.append( draw_crop_scale( stack, zoom ))
return imagelist
def optikern( image, width, zoom ): # optical kerning algorithm
left = []
right = []
resolution = 8
for i in range( 0, 18 * resolution ):
y = 1.0/resolution * (i + 0.5) * zoom
xmin = None
xmax = None
for cmd,params in image:
segmentBox = None
if cmd == 'M':
# A move cannot contribute to the bounding box
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
if (y >= last[1] and y <= params[1]) or (y >= params[1] and y <= last[1]):
if params[0] == last[0]:
x = params[0]
else:
a = (params[1] - last[1]) / (params[0] - last[0])
b = last[1] - a * last[0]
if a != 0:
x = (y - b) / a
else: x = None
if x:
if xmin == None or x < xmin: xmin = x
if xmax == None or x > xmax: xmax = x
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
if last:
bx0, by0 = last[:]
bx1, by1, bx2, by2, bx3, by3 = params[:]
d = by0 - y
c = -3*by0 + 3*by1
b = 3*by0 - 6*by1 + 3*by2
a = -by0 + 3*by1 - 3*by2 + by3
ts = findRealRoots(a, b, c, d)
for t in ts:
if t >= 0 and t <= 1:
x = (-bx0 + 3*bx1 - 3*bx2 + bx3)*(t**3) + \
(3*bx0 - 6*bx1 + 3*bx2)*(t**2) + \
(-3*bx0 + 3*bx1)*t + \
bx0
if xmin == None or x < xmin: xmin = x
if xmax == None or x > xmax: xmax = x
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
# Quadratic beziers are ignored
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'A':
# Arcs are ignored
last = params[-2:]
lastctrl = params[2:4]
if xmin != None and xmax != None:
left.append( xmin ) # distance from left edge of region to left edge of bbox
right.append( width - xmax ) # distance from right edge of region to right edge of bbox
else:
left.append( width )
right.append( width )
return (left, right)
def layoutstring( imagelist, zoom ): # layout string of letter-images using optical kerning
kernlist = []
length = zoom
for entry in imagelist:
if (entry == " "): # leaving room for " " space characters
length = length + (zoom * render_alphabetsoup_config.space)
else:
image, width, height = entry
length = length + width + zoom # add letter length to overall length
kernlist.append( optikern(image, width, zoom) ) # append kerning data for this image
workspace = None
position = zoom
for i in range(0, len(kernlist)):
while(imagelist[i] == " "):
position = position + (zoom * render_alphabetsoup_config.space )
imagelist.pop(i)
image, width, height = imagelist[i]
# set the kerning
if i == 0: kern = 0 # for first image, kerning is zero
else:
kerncompare = [] # kerning comparison array
for j in range( 0, len(kernlist[i][0])):
kerncompare.append( kernlist[i][0][j]+kernlist[i-1][1][j] )
kern = min( kerncompare )
position = position - kern # move position back by kern amount
thisimage = copy.deepcopy(image)
simplepath.translatePath(thisimage, position, 0)
workspace = combinePaths(workspace, thisimage)
position = position + width + zoom # advance position by letter width
return workspace
def tokenize(text):
"""Tokenize the string, looking for LaTeX style, multi-character tokens in the string, like \\yogh."""
tokens = []
i = 0
while i < len(text):
c = text[i]
i += 1
if c == '\\': # found the beginning of an escape
t = ''
while i < len(text): # gobble up content of the escape
c = text[i]
if c == '\\': # found another escape, stop this one
break
i += 1
if c == ' ': # a space terminates this escape
break
t += c # stick this character onto the token
if t:
tokens.append(t)
else:
tokens.append(c)
return tokens
class AlphabetSoup(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-t", "--text",
action="store", type="string",
dest="text", default="Inkscape",
help="The text for alphabet soup")
self.OptionParser.add_option("-z", "--zoom",
action="store", type="float",
dest="zoom", default="8.0",
help="The zoom on the output graphics")
self.OptionParser.add_option("-r", "--randomize",
action="store", type="inkbool",
dest="randomize", default=False,
help="Generate random (unreadable) text")
def effect(self):
zoom = self.unittouu( str(self.options.zoom) + 'px')
if self.options.randomize:
imagelist = generate_random_string(self.options.text, zoom)
else:
tokens = tokenize(self.options.text)
imagelist = randomize_input_string(tokens, zoom)
image = layoutstring( imagelist, zoom )
if image:
s = { 'stroke': 'none', 'fill': '#000000' }
new = inkex.etree.Element(inkex.addNS('path','svg'))
new.set('style', simplestyle.formatStyle(s))
new.set('d', simplepath.formatPath(image))
self.current_layer.append(new)
if __name__ == '__main__':
e = AlphabetSoup()
e.affect()
| tik0/inkscapeGrid | share/extensions/render_alphabetsoup.py | Python | gpl-2.0 | 16,568 |
# coding: utf-8
"""
flask_wtf.i18n
~~~~~~~~~~~~~~
Internationalization support for Flask WTF.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
from flask import _request_ctx_stack
from wtforms.ext.i18n.utils import messages_path
from flask.ext.babel import get_locale
from speaklater import make_lazy_string
from babel import support
__all__ = ('Translations', 'translations')
def _get_translations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
# babel should be in extensions for get_locale
if 'babel' not in ctx.app.extensions:
return None
translations = getattr(ctx, 'wtforms_translations', None)
if translations is None:
dirname = messages_path()
translations = support.Translations.load(
dirname, [get_locale()], domain='wtforms'
)
ctx.wtforms_translations = translations
return translations
def _gettext(string):
t = _get_translations()
if t is None:
return string
if hasattr(t, 'ugettext'):
return t.ugettext(string)
# Python 3 has no ugettext
return t.gettext(string)
def _ngettext(singular, plural, n):
t = _get_translations()
if t is None:
if n == 1:
return singular
return plural
if hasattr(t, 'ungettext'):
return t.ungettext(singular, plural, n)
# Python 3 has no ungettext
return t.ngettext(singular, plural, n)
class Translations(object):
def gettext(self, string):
return make_lazy_string(_gettext, string)
def ngettext(self, singular, plural, n):
return make_lazy_string(_ngettext, singular, plural, n)
translations = Translations()
| xkcd1253/Mimi | flask/lib/python2.7/site-packages/flask_wtf/i18n.py | Python | gpl-2.0 | 1,803 |
"""Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <typo_pl@hotmail.com> (MSIE Perl code)
Copyright 2002-2006 John J Lee <jjl@pobox.com> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
if os.name == "nt":
import _winreg
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_filetime(filetime)
if expires < now:
discard = True
else:
discard = False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- e.g. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# skip to start of first record
index.seek(size, 0)
sector = 128 # size of sector in bytes
while 1:
data = ""
# Cookies are usually in two contiguous sectors, so read in two
# sectors and adjust if not a Cookie.
to_read = 2 * sector
d = index.read(to_read)
if len(d) != to_read:
break
data = data + d
# Each record starts with a 4-byte signature and a count
# (little-endian DWORD) of sectors for the record.
sig, size, data = data[:4], data[4:8], data[8:]
size = struct.unpack("<L", size)[0]
to_read = (size - 2) * sector
## from urllib import quote
## print "data", quote(data)
## print "sig", quote(sig)
## print "size in sectors", size
## print "size in bytes", size*sector
## print "size in units of 16 bytes", (size*sector) / 16
## print "size to read in bytes", to_read
## print
if sig != "URL ":
assert sig in ("HASH", "LEAK", \
self.padding, "\x00\x00\x00\x00"), \
"unrecognized MSIE index.dat record: %s" % \
binary_to_str(sig)
if sig == "\x00\x00\x00\x00":
# assume we've got all the cookies, and stop
break
if sig == self.padding:
continue
# skip the rest of this record
assert to_read >= 0
if size != 2:
assert to_read != 0
index.seek(to_read, 1)
continue
# read in rest of record if necessary
if size > 2:
more_data = index.read(to_read)
if len(more_data) != to_read: break
data = data + more_data
cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
"(%s\@[\x21-\xFF]+\.txt)" % username)
m = re.search(cookie_re, data, re.I)
if m:
cookie_file = os.path.join(cookie_dir, m.group(2))
if not self.delayload:
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s",
cookie_file)
else:
domain = m.group(1)
i = domain.find("/")
if i != -1:
domain = domain[:i]
self._delayload_domains[domain] = (
cookie_file, ignore_discard, ignore_expires)
class MSIECookieJar(MSIEBase, FileCookieJar):
"""FileCookieJar that reads from the Windows MSIE cookies database.
MSIECookieJar can read the cookie files of Microsoft Internet Explorer
(MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
Windows 98. Other configurations may also work, but are untested. Saving
cookies in MSIE format is NOT supported. If you save cookies, they'll be
in the usual Set-Cookie3 format, which you can read back in using an
instance of the plain old CookieJar class. Don't save using the same
filename that you loaded cookies from, because you may succeed in
clobbering your MSIE cookies index file!
You should be able to have LWP share Internet Explorer's cookies like
this (note you need to supply a username to load_from_registry if you're on
Windows 9x or Windows ME):
cj = MSIECookieJar(delayload=1)
# find cookies index file in registry and load cookies from it
cj.load_from_registry()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
response = opener.open("http://example.com/")
Iterating over a delayloaded MSIECookieJar instance will not cause any
cookies to be read from disk. To force reading of all cookies from disk,
call read_all_cookies. Note that the following methods iterate over self:
clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
and as_string.
Additional methods:
load_from_registry(ignore_discard=False, ignore_expires=False,
username=None)
load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
read_all_cookies()
"""
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy)
def set_cookie(self, cookie):
if self.delayload:
self._delayload_domain(cookie.domain)
CookieJar.set_cookie(self, cookie)
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookies_for_domain(self, domain, request):
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
if self.delayload:
self._delayload_domain(domain)
return CookieJar._cookies_for_domain(self, domain, request)
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain)
def load(self, filename, ignore_discard=False, ignore_expires=False,
username=None):
"""Load cookies from an MSIE 'index.dat' cookies index file.
filename: full path to cookie index file
username: only required on win9x
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
index = open(filename, "rb")
try:
self._really_load(index, filename, ignore_discard, ignore_expires,
username)
finally:
index.close()
| ppizarror/TerminalDictionary | lib/mechanize/_msiecookiejar.py | Python | gpl-2.0 | 14,697 |
"""Test class for ISO downloads UI
:Requirement: Isodownload
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import run_only_on, stubbed, tier1
from robottelo.test import UITestCase
class ISODownloadTestCase(UITestCase):
"""Test class for iso download feature"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_download(self):
"""Downloading ISO from export
:id: 47f20df7-f6f3-422b-b57b-3a5ef9cf62ad
:Steps:
1. find out the location where all iso's are kept
2. check whether a valid iso can be downloaded
:expectedresults: iso file is properly downloaded on your satellite
6 system
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_upload(self):
"""Uploadng the iso successfully to the sat6 system
:id: daf87a68-7c61-46f1-b4cc-021476080b6b
:Steps:
1. download the iso
2. upload it to sat6 system
:expectedresults: uploading iso to satellite6 is successful
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_mount(self):
"""Mounting iso to directory accessible to satellite6 works
:id: 44d3c8fa-c01f-438c-b83e-8f6894befbbf
:Steps:
1. download the iso
2. upload it to sat6 system
3. mount it a local sat6 directory
:expectedresults: iso is mounted to sat6 local directory
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_validate_cdn_url(self):
"""Validate that cdn url to file path works
:id: 00157f61-1557-48a7-b7c9-6dac726eff94
:Steps:
1. after mounting the iso locally try to update the cdn url
2. the path should be validated
:expectedresults: cdn url path is validated
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_check_message(self):
"""Check if proper message is displayed after successful upload
:id: 5ed31a26-b902-4352-900f-bb38eac95511
:Steps:
1. mount the iso to sat6
2. update the cdn url with file path
3. check if proper message is displayed
:expectedresults: Asserting the message after successful upload
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_enable_repo(self):
"""Enable the repositories
:id: e33e2796-0554-419f-b5a1-3e2c8e23e950
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
:expectedresults: Redhat repositories are enabled
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_validate_checkboxes(self):
"""Check if enabling the checkbox works
:id: 10b19405-f82e-4f95-869d-28d91cac1e6f
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. Click the checkbox to enable redhat repositories
5. redhat repository enabled
:expectedresults: Checkbox functionality works
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_sync_repos(self):
"""Sync repos to local iso's
:id: 96266438-4a52-4222-b573-96bd7cde1700
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
5. sync the repos
:expectedresults: Repos are synced after upload
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_disable_repo(self):
"""Disabling the repo works
:id: 075700a7-fda0-41db-b9b7-3d6b29f63784
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
5. sync the contents
6. try disabling the repository
:expectedresults: Assert disabling the repo
:caseautomation: notautomated
:CaseImportance: Critical
"""
| elyezer/robottelo | tests/foreman/ui/test_isodownload.py | Python | gpl-3.0 | 4,978 |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
from jpype import JException, java, JavaException, JProxy, JPackage
import unittest, common
import traceback
def throwIOException() :
raise java.io.IOException.PYEXC("Test throw")
def throwByJavaException() :
JPackage('jpype').exc.ExceptionTest.throwIOException()
def suite() :
return unittest.makeSuite(ExceptionTestCase)
class ExceptionTestCase(common.JPypeTestCase) :
def testExceptionThrown(self) :
try :
self.jpype.exc.ExceptionTest.throwRuntime()
assert False
except JavaException, ex :
print 'Caught a Java exception ...'
if ex.javaClass() is java.lang.RuntimeException :
print "Caught the exception", ex.message()
print ex.stacktrace()
else:
assert False
except Exception, ex:
print ex.__class__, isinstance(ex, JavaException)
print ex.__class__.__bases__[0].__bases__[0].__bases__
print JavaException
assert False
print 'if here, everything is fine'
def testExceptionByJavaClass(self) :
try :
self.jpype.exc.ExceptionTest.throwRuntime()
assert False
except JException(java.lang.RuntimeException), ex :
print "Caught the exception", ex.message(), "->", ex.javaClass()
print ex.stacktrace()
except Exception, ex:
print ex
assert False
# def testThrowException(self) :
# d = {"throwIOException" : throwIOException, }
# p = JProxy(self.jpype.exc.ExceptionThrower, dict=d)
#
# assert self.jpype.exc.ExceptionTest.delegateThrow(p)
def testThrowException3(self) :
d = {"throwIOException" : throwByJavaException, }
p = JProxy(self.jpype.exc.ExceptionThrower, dict=d)
assert self.jpype.exc.ExceptionTest.delegateThrow(p)
| tomaslaz/KLMC_Analysis | thirdparty/JPype-0.5.4.2/test/jpypetest/exc.py | Python | gpl-3.0 | 2,462 |
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true when state is 'present'
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet.
required: true when state is 'present'
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
required: false
default: None
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
required: false
default: None
host_routes:
description:
- A list of host route dictionaries for the subnet.
required: false
default: None
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state=present
network_name=network1
name=net1subnet
cidr=192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 123.456.78.9
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state=absent
name=net1subnet
'''
def _needs_update(subnet, module):
"""Check for differences in the updatable values."""
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
return False
def _system_state_change(module, subnet):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module)
if state == 'absent' and subnet:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['network_name', 'cidr']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
try:
cloud = shade.openstack_cloud(**module.params)
subnet = cloud.get_subnet(subnet_name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet))
if state == 'present':
if not subnet:
subnet = cloud.create_subnet(network_name, cidr,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
module.exit_json(changed=True, result="created")
else:
if _needs_update(subnet, module):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
module.exit_json(changed=True, result="updated")
else:
module.exit_json(changed=False, result="success")
elif state == 'absent':
if not subnet:
module.exit_json(changed=False, result="success")
else:
cloud.delete_subnet(subnet_name)
module.exit_json(changed=True, result="deleted")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| az7arul/ansible-modules-core | cloud/openstack/os_subnet.py | Python | gpl-3.0 | 8,648 |
#
# Copyright 2011-2019 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""Import objects by name"""
import importlib
import inspect
import warnings
def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr
def fully_qualified_name(obj, sep='.'):
warnings.warn(
"use numina.util.fqn.fully_qualified_name instead",
DeprecationWarning, stacklevel=2
)
import numina.util.fqn as fqn
return fqn.fully_qualified_name(obj, sep)
| guaix-ucm/numina | numina/util/objimport.py | Python | gpl-3.0 | 1,031 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table('attachments_attachment', (
('reusableplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ReusablePlugin'], unique=True, primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['attachments.AttachmentRevision'])),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('attachments', ['Attachment'])
# Adding model 'AttachmentRevision'
db.create_table('attachments_attachmentrevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.AttachmentRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('attachment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.Attachment'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('attachments', ['AttachmentRevision'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table('attachments_attachment')
# Deleting model 'AttachmentRevision'
db.delete_table('attachments_attachmentrevision')
models = {
'attachments.attachment': {
'Meta': {'object_name': 'Attachment', '_ormbases': ['wiki.ReusablePlugin']},
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'ordering': "('created',)", 'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.AttachmentRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'redirect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirect_set'", 'null': 'True', 'to': "orm['wiki.Article']"}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
}
}
complete_apps = ['attachments'] | habibmasuro/django-wiki | wiki/plugins/attachments/migrations/0001_initial.py | Python | gpl-3.0 | 12,406 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import sickbeard
import generic
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
import xml.etree.cElementTree as etree
class MediaBrowserMetadata(generic.GenericMetadata):
"""
Metadata generation class for Media Browser 2.x/3.x - Standard Mode.
The following file structure is used:
show_root/series.xml (show metadata)
show_root/folder.jpg (poster)
show_root/backdrop.jpg (fanart)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/metadata/filename.xml (episode metadata)
show_root/Season ##/metadata/filename.jpg (episode thumb)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'MediaBrowser'
self._ep_nfo_extension = 'xml'
self._show_metadata_filename = 'series.xml'
self.fanart_name = "backdrop.jpg"
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "series.xml"
self.eg_episode_metadata = "Season##\\metadata\\<i>filename</i>.xml"
self.eg_fanart = "backdrop.jpg"
self.eg_poster = "folder.jpg"
self.eg_banner = "banner.jpg"
self.eg_episode_thumbnails = "Season##\\metadata\\<i>filename</i>.jpg"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "Season##\\banner.jpg"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# while show metadata is generated, it is not supported for our lookup
return (None, None, None)
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/metadata/episode.xml path for MediaBrowser
episode metadata files
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
xml_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), self._ep_nfo_extension)
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata')
xml_file_path = ek.ek(os.path.join, metadata_dir_name, xml_file_name)
else:
logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return xml_file_path
def get_episode_thumb_path(self, ep_obj):
"""
Returns a full show dir/metadata/episode.jpg path for MediaBrowser
episode thumbs.
ep_obj: a TVEpisode object to get the path from
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), 'jpg')
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata')
tbn_file_path = ek.ek(os.path.join, metadata_dir_name, tbn_file_name)
else:
return None
return tbn_file_path
def get_season_poster_path(self, show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if
ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
def get_season_banner_path(self, show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/banner.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if
ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'banner.jpg')
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for a MediaBrowser-style series.xml
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
indexer_lang = show_obj.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
if indexer_lang and not indexer_lang == 'en':
lINDEXER_API_PARMS['language'] = indexer_lang
if show_obj.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
tv_node = etree.Element("Series")
try:
myShow = t[int(show_obj.indexerid)]
except sickbeard.indexer_shownotfound:
logger.log(u"Unable to find show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
show_obj.indexer).name + ", skipping it", logger.ERROR)
raise
except sickbeard.indexer_error:
logger.log(
u"" + sickbeard.indexerApi(show_obj.indexer).name + " is down, can't use its data to make the NFO",
logger.ERROR)
raise
# check for title and id
if getattr(myShow, 'seriesname', None) is None or getattr(myShow, 'id', None) is None:
logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
show_obj.indexer).name + ", skipping it", logger.ERROR)
return False
indexerid = etree.SubElement(tv_node, "id")
if getattr(myShow, 'id', None) is not None:
indexerid.text = str(myShow['id'])
indexer = etree.SubElement(tv_node, "indexer")
if show_obj.indexer != None:
indexer.text = str(show_obj.indexer)
SeriesName = etree.SubElement(tv_node, "SeriesName")
if getattr(myShow, 'seriesname', None) is not None:
SeriesName.text = myShow['seriesname']
Status = etree.SubElement(tv_node, "Status")
if getattr(myShow, 'status', None) is not None:
Status.text = myShow['status']
Network = etree.SubElement(tv_node, "Network")
if getattr(myShow, 'network', None) is not None:
Network.text = myShow['network']
Airs_Time = etree.SubElement(tv_node, "Airs_Time")
if getattr(myShow, 'airs_time', None) is not None:
Airs_Time.text = myShow['airs_time']
Airs_DayOfWeek = etree.SubElement(tv_node, "Airs_DayOfWeek")
if getattr(myShow, 'airs_dayofweek', None) is not None:
Airs_DayOfWeek.text = myShow['airs_dayofweek']
FirstAired = etree.SubElement(tv_node, "FirstAired")
if getattr(myShow, 'firstaired', None) is not None:
FirstAired.text = myShow['firstaired']
ContentRating = etree.SubElement(tv_node, "ContentRating")
MPAARating = etree.SubElement(tv_node, "MPAARating")
certification = etree.SubElement(tv_node, "certification")
if getattr(myShow, 'contentrating', None) is not None:
ContentRating.text = myShow['contentrating']
MPAARating.text = myShow['contentrating']
certification.text = myShow['contentrating']
MetadataType = etree.SubElement(tv_node, "Type")
MetadataType.text = "Series"
Overview = etree.SubElement(tv_node, "Overview")
if getattr(myShow, 'overview', None) is not None:
Overview.text = myShow['overview']
PremiereDate = etree.SubElement(tv_node, "PremiereDate")
if getattr(myShow, 'firstaired', None) is not None:
PremiereDate.text = myShow['firstaired']
Rating = etree.SubElement(tv_node, "Rating")
if getattr(myShow, 'rating', None) is not None:
Rating.text = myShow['rating']
ProductionYear = etree.SubElement(tv_node, "ProductionYear")
if getattr(myShow, 'firstaired', None) is not None:
try:
year_text = str(datetime.datetime.strptime(myShow['firstaired'], '%Y-%m-%d').year)
if year_text:
ProductionYear.text = year_text
except:
pass
RunningTime = etree.SubElement(tv_node, "RunningTime")
Runtime = etree.SubElement(tv_node, "Runtime")
if getattr(myShow, 'runtime', None) is not None:
RunningTime.text = myShow['runtime']
Runtime.text = myShow['runtime']
IMDB_ID = etree.SubElement(tv_node, "IMDB_ID")
IMDB = etree.SubElement(tv_node, "IMDB")
IMDbId = etree.SubElement(tv_node, "IMDbId")
if getattr(myShow, 'imdb_id', None) is not None:
IMDB_ID.text = myShow['imdb_id']
IMDB.text = myShow['imdb_id']
IMDbId.text = myShow['imdb_id']
Zap2ItId = etree.SubElement(tv_node, "Zap2ItId")
if getattr(myShow, 'zap2it_id', None) is not None:
Zap2ItId.text = myShow['zap2it_id']
Genres = etree.SubElement(tv_node, "Genres")
for genre in myShow['genre'].split('|'):
if genre:
cur_genre = etree.SubElement(Genres, "Genre")
cur_genre.text = genre
Genre = etree.SubElement(tv_node, "Genre")
if getattr(myShow, 'genre', None) is not None:
Genre.text = "|".join([x for x in myShow["genre"].split('|') if x])
Studios = etree.SubElement(tv_node, "Studios")
Studio = etree.SubElement(Studios, "Studio")
if getattr(myShow, 'network', None) is not None:
Studio.text = myShow['network']
Persons = etree.SubElement(tv_node, "Persons")
if getattr(myShow, 'actors', None) is not None:
for actor in myShow['_actors']:
cur_actor = etree.SubElement(Persons, "Person")
cur_actor_name = etree.SubElement(cur_actor, "Name")
if getattr(actor, 'name', None):
cur_actor_name.text = actor['name'].strip()
cur_actor_type = etree.SubElement(cur_actor, "Type")
cur_actor_type.text = "Actor"
cur_actor_role = etree.SubElement(cur_actor, "Role")
if getattr(actor, 'role', None):
cur_actor_role.text = actor['role']
helpers.indentXML(tv_node)
data = etree.ElementTree(tv_node)
return data
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for a MediaBrowser style episode.xml
and returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
persons_dict = {}
persons_dict['Director'] = []
persons_dict['GuestStar'] = []
persons_dict['Writer'] = []
indexer_lang = ep_obj.show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
if indexer_lang and not indexer_lang == 'en':
lINDEXER_API_PARMS['language'] = indexer_lang
if ep_obj.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
myShow = t[ep_obj.show.indexerid]
except sickbeard.indexer_shownotfound, e:
raise exceptions.ShowNotFoundException(e.message)
except sickbeard.indexer_error, e:
logger.log(u"Unable to connect to " + sickbeard.indexerApi(
ep_obj.show.indexer).name + " while creating meta files - skipping - " + ex(e), logger.ERROR)
return False
rootNode = etree.Element("Item")
# write an MediaBrowser XML containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(
curEpToWrite.episode) + " on " + sickbeard.indexerApi(
ep_obj.show.indexer).name + ".. has it been removed? Should I delete from db?")
return None
if curEpToWrite == ep_obj:
# root (or single) episode
# default to today's date for specials if firstaired is not set
if getattr(myEp, 'firstaired', None) is None and ep_obj.season == 0:
myEp['firstaired'] = str(datetime.date.fromordinal(1))
if getattr(myEp, 'episodename', None) is None or getattr(myEp, 'firstaired', None) is None:
return None
episode = rootNode
EpisodeName = etree.SubElement(episode, "EpisodeName")
if curEpToWrite.name != None:
EpisodeName.text = curEpToWrite.name
else:
EpisodeName.text = ""
EpisodeNumber = etree.SubElement(episode, "EpisodeNumber")
EpisodeNumber.text = str(ep_obj.episode)
if ep_obj.relatedEps:
EpisodeNumberEnd = etree.SubElement(episode, "EpisodeNumberEnd")
EpisodeNumberEnd.text = str(curEpToWrite.episode)
SeasonNumber = etree.SubElement(episode, "SeasonNumber")
SeasonNumber.text = str(curEpToWrite.season)
if not ep_obj.relatedEps:
absolute_number = etree.SubElement(episode, "absolute_number")
if getattr(myEp, 'absolute_number', None) is not None:
absolute_number.text = myEp['absolute_number']
FirstAired = etree.SubElement(episode, "FirstAired")
if curEpToWrite.airdate != datetime.date.fromordinal(1):
FirstAired.text = str(curEpToWrite.airdate)
else:
FirstAired.text = ""
MetadataType = etree.SubElement(episode, "Type")
MetadataType.text = "Episode"
Overview = etree.SubElement(episode, "Overview")
if curEpToWrite.description != None:
Overview.text = curEpToWrite.description
else:
Overview.text = ""
if not ep_obj.relatedEps:
Rating = etree.SubElement(episode, "Rating")
if getattr(myEp, 'rating', None) is not None:
Rating.text = myEp['rating']
IMDB_ID = etree.SubElement(episode, "IMDB_ID")
IMDB = etree.SubElement(episode, "IMDB")
IMDbId = etree.SubElement(episode, "IMDbId")
if getattr(myShow, 'imdb_id', None) is not None:
IMDB_ID.text = myShow['imdb_id']
IMDB.text = myShow['imdb_id']
IMDbId.text = myShow['imdb_id']
indexerid = etree.SubElement(episode, "id")
indexerid.text = str(curEpToWrite.indexerid)
indexer = etree.SubElement(episode, "indexer")
indexer.text = str(curEpToWrite.show.indexer)
Persons = etree.SubElement(episode, "Persons")
Language = etree.SubElement(episode, "Language")
try:
Language.text = myEp['language']
except:
Language.text = 'en' # tvrage api doesn't provide language so we must assume a value here
thumb = etree.SubElement(episode, "filename")
# TODO: See what this is needed for.. if its still needed
# just write this to the NFO regardless of whether it actually exists or not
# note: renaming files after nfo generation will break this, tough luck
thumb_text = self.get_episode_thumb_path(ep_obj)
if thumb_text:
thumb.text = thumb_text
else:
# append data from (if any) related episodes
EpisodeNumberEnd.text = str(curEpToWrite.episode)
if curEpToWrite.name:
if not EpisodeName.text:
EpisodeName.text = curEpToWrite.name
else:
EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name
if curEpToWrite.description:
if not Overview.text:
Overview.text = curEpToWrite.description
else:
Overview.text = Overview.text + "\r" + curEpToWrite.description
# collect all directors, guest stars and writers
if getattr(myEp, 'director', None) is not None:
persons_dict['Director'] += [x.strip() for x in myEp['director'].split('|') if x]
if getattr(myEp, 'gueststars', None) is not None:
persons_dict['GuestStar'] += [x.strip() for x in myEp['gueststars'].split('|') if x]
if getattr(myEp, 'writer', None) is not None:
persons_dict['Writer'] += [x.strip() for x in myEp['writer'].split('|') if x]
# fill in Persons section with collected directors, guest starts and writers
for person_type, names in persons_dict.iteritems():
# remove doubles
names = list(set(names))
for cur_name in names:
Person = etree.SubElement(Persons, "Person")
cur_person_name = etree.SubElement(Person, "Name")
cur_person_name.text = cur_name
cur_person_type = etree.SubElement(Person, "Type")
cur_person_type.text = person_type
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = MediaBrowserMetadata
| bckwltn/SickRage | sickbeard/metadata/mediabrowser.py | Python | gpl-3.0 | 22,239 |
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para rapidvideo
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urllib,re
from core import scrapertools
from core import logger
from lib.jsbeautifier.unpackers import packer
def test_video_exists( page_url ):
logger.info( "[rapidvideo.py] test_video_exists(page_url='%s')" % page_url )
video_id = scrapertools.get_match( page_url, 'org/([A-Za-z0-9]+)' )
url = 'http://www.rapidvideo.org/embed-%s-607x360.html' % video_id
data = scrapertools.cache_page( url )
if "The file was removed from RapidVideo" in data:
return False, "The file not exists or was removed from RapidVideo."
return True, ""
def get_video_url( page_url, premium = False, user="", password="", video_password="" ):
logger.info( "[rapidvideo.py] url=" + page_url )
video_id = scrapertools.get_match( page_url, 'org/([A-Za-z0-9]+)' )
url = 'http://www.rapidvideo.org/embed-%s-607x360.html' % video_id
#data = scrapertools.cache_page( url ).replace( 'TMPL_VAR|', '' )
data = scrapertools.cache_page( url )
packed = scrapertools.get_match( data, "<script type='text/javascript'>eval.function.p,a,c,k,e,.*?</script>" )
unpacked = packer.unpack( packed )
media_url = scrapertools.get_match( unpacked, 'file:"([^"]+)"' )
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url( media_url )[-4:] + " [fastvideo.me]", media_url ] )
for video_url in video_urls:
logger.info( "[fastvideo.py] %s - %s" % ( video_url[0], video_url[1] ) )
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url( media_url )[-4:] + " [rapidvideo.org]", media_url ] )
for video_url in video_urls:
logger.info( "[rapidvideo.py] %s - %s" % ( video_url[0], video_url[1] ) )
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos( data ):
encontrados = set()
devuelve = []
#http://www.rapidvideo.org/xr1nb7cfh58a
patronvideos = 'rapidvideo.org/([A-Za-z0-9]+)'
logger.info( "[rapidvideo.py] find_videos #" + patronvideos + "#" )
matches = re.compile( patronvideos, re.DOTALL ).findall( data )
for match in matches:
titulo = "[rapidvideo]"
url = "http://www.rapidvideo.org/" + match
if url not in encontrados:
logger.info( " url=" + url )
devuelve.append( [ titulo, url, 'rapidvideo' ] )
encontrados.add( url )
else:
logger.info( " url duplicada=" + url )
return devuelve
def test():
video_urls = get_video_url( "http://www.rapidvideo.org/xr1nb7cfh58a" )
return len( video_urls ) > 0
| dentaku65/plugin.video.pelisalacarta.it | servers/rapidvideo.py | Python | gpl-3.0 | 2,858 |
"""autogenerated by genpy from rosserial_msgs/TopicInfo.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TopicInfo(genpy.Message):
_md5sum = "0ad51f88fc44892f8c10684077646005"
_type = "rosserial_msgs/TopicInfo"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# special topic_ids
uint16 ID_PUBLISHER=0
uint16 ID_SUBSCRIBER=1
uint16 ID_SERVICE_SERVER=2
uint16 ID_SERVICE_CLIENT=4
uint16 ID_PARAMETER_REQUEST=6
uint16 ID_LOG=7
uint16 ID_TIME=10
uint16 ID_TX_STOP=11
# The endpoint ID for this topic
uint16 topic_id
string topic_name
string message_type
# MD5 checksum for this message type
string md5sum
# size of the buffer message must fit in
int32 buffer_size
"""
# Pseudo-constants
ID_PUBLISHER = 0
ID_SUBSCRIBER = 1
ID_SERVICE_SERVER = 2
ID_SERVICE_CLIENT = 4
ID_PARAMETER_REQUEST = 6
ID_LOG = 7
ID_TIME = 10
ID_TX_STOP = 11
__slots__ = ['topic_id','topic_name','message_type','md5sum','buffer_size']
_slot_types = ['uint16','string','string','string','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
topic_id,topic_name,message_type,md5sum,buffer_size
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TopicInfo, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.topic_id is None:
self.topic_id = 0
if self.topic_name is None:
self.topic_name = ''
if self.message_type is None:
self.message_type = ''
if self.md5sum is None:
self.md5sum = ''
if self.buffer_size is None:
self.buffer_size = 0
else:
self.topic_id = 0
self.topic_name = ''
self.message_type = ''
self.md5sum = ''
self.buffer_size = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_H.pack(self.topic_id))
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.message_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.md5sum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.buffer_size))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 2
(self.topic_id,) = _struct_H.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message_type = str[start:end].decode('utf-8')
else:
self.message_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.md5sum = str[start:end].decode('utf-8')
else:
self.md5sum = str[start:end]
start = end
end += 4
(self.buffer_size,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_H.pack(self.topic_id))
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.message_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.md5sum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.buffer_size))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 2
(self.topic_id,) = _struct_H.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message_type = str[start:end].decode('utf-8')
else:
self.message_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.md5sum = str[start:end].decode('utf-8')
else:
self.md5sum = str[start:end]
start = end
end += 4
(self.buffer_size,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_i = struct.Struct("<i")
_struct_H = struct.Struct("<H")
| blutjens/perc_neuron_ros_ur10 | pn_ros/bjorn_ws/devel/lib/python2.7/dist-packages/rosserial_msgs/msg/_TopicInfo.py | Python | gpl-3.0 | 7,843 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from optparse import make_option
import random
import math
from django.contrib.gis.geos import Point
from treemap.models import Plot, Tree, Species
from treemap.management.util import InstanceDataCommand
class Command(InstanceDataCommand):
option_list = InstanceDataCommand.option_list + (
make_option('-r', '--radius',
action='store',
type='int',
dest='radius',
default=5000,
help='Number of meters from the center'),
make_option('-n', '--number-of-trees',
action='store',
type='int',
dest='n',
default=100000,
help='Number of trees to create'),
make_option('-p', '--prob-of-tree',
action='store',
type='int',
dest='ptree',
default=50,
help=('Probability that a given plot will '
'have a tree (0-100)')),
make_option('-s', '--prob-of-species',
action='store',
type='int',
dest='pspecies',
default=50,
help=('Probability that a given tree will '
'have a species (0-100)')),
make_option('-D', '--prob-of-diameter',
action='store',
type='int',
dest='pdiameter',
default=10,
help=('Probability that a given tree will '
'have a diameter (0-100)')))
def handle(self, *args, **options):
""" Create some seed data """
instance, user = self.setup_env(*args, **options)
species_qs = instance.scope_model(Species)
n = options['n']
self.stdout.write("Will create %s plots" % n)
get_prob = lambda option: float(min(100, max(0, option))) / 100.0
tree_prob = get_prob(options['ptree'])
species_prob = get_prob(options['pspecies'])
diameter_prob = get_prob(options['pdiameter'])
max_radius = options['radius']
center_x = instance.center.x
center_y = instance.center.y
ct = 0
cp = 0
for i in xrange(0, n):
mktree = random.random() < tree_prob
radius = random.gauss(0.0, max_radius)
theta = random.random() * 2.0 * math.pi
x = math.cos(theta) * radius + center_x
y = math.sin(theta) * radius + center_y
plot = Plot(instance=instance,
geom=Point(x, y))
plot.save_with_user(user)
cp += 1
if mktree:
add_species = random.random() < species_prob
if add_species:
species = random.choice(species_qs)
else:
species = None
add_diameter = random.random() < diameter_prob
if add_diameter:
diameter = 2 + random.random() * 18
else:
diameter = None
tree = Tree(plot=plot,
species=species,
diameter=diameter,
instance=instance)
tree.save_with_user(user)
ct += 1
self.stdout.write("Created %s trees and %s plots" % (ct, cp))
| johnsonc/OTM2 | opentreemap/treemap/management/commands/random_trees.py | Python | gpl-3.0 | 3,642 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovh_ip_loadbalancing_backend
short_description: Manage OVH IP LoadBalancing backends
description:
- Manage OVH (French European hosting provider) LoadBalancing IP backends
version_added: "2.2"
author: Pascal HERAUD @pascalheraud
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh > 0.3.5
options:
name:
required: true
description:
- Name of the LoadBalancing internal name (ip-X.X.X.X)
backend:
required: true
description:
- The IP address of the backend to update / modify / delete
state:
required: false
default: present
choices: ['present', 'absent']
description:
- Determines whether the backend is to be created/modified
or deleted
probe:
required: false
default: none
choices: ['none', 'http', 'icmp' , 'oco']
description:
- Determines the type of probe to use for this backend
weight:
required: false
default: 8
description:
- Determines the weight for this backend
endpoint:
required: true
description:
- The endpoint to use ( for instance ovh-eu)
application_key:
required: true
description:
- The applicationKey to use
application_secret:
required: true
description:
- The application secret to use
consumer_key:
required: true
description:
- The consumer key to use
timeout:
required: false
default: 120
description:
- The timeout in seconds used to wait for a task to be
completed.
'''
EXAMPLES = '''
# Adds or modify the backend '212.1.1.1' to a
# loadbalancing 'ip-1.1.1.1'
- ovh_ip_loadbalancing:
name: ip-1.1.1.1
backend: 212.1.1.1
state: present
probe: none
weight: 8
endpoint: ovh-eu
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
# Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
- ovh_ip_loadbalancing:
name: ip-1.1.1.1
backend: 212.1.1.1
state: absent
endpoint: ovh-eu
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
'''
RETURN = '''
'''
import time
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def getOvhClient(ansibleModule):
endpoint = ansibleModule.params.get('endpoint')
application_key = ansibleModule.params.get('application_key')
application_secret = ansibleModule.params.get('application_secret')
consumer_key = ansibleModule.params.get('consumer_key')
return ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key
)
def waitForNoTask(client, name, timeout):
currentTimeout = timeout
while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
time.sleep(1) # Delay for 1 sec
currentTimeout -= 1
if currentTimeout < 0:
return False
return True
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
backend=dict(required=True),
weight=dict(default=8, type='int'),
probe=dict(default='none',
choices=['none', 'http', 'icmp', 'oco']),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=True),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
timeout=dict(default=120, type='int')
)
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module'
'is required to run this module ')
# Get parameters
name = module.params.get('name')
state = module.params.get('state')
backend = module.params.get('backend')
weight = long(module.params.get('weight'))
probe = module.params.get('probe')
timeout = module.params.get('timeout')
# Connect to OVH API
client = getOvhClient(module)
# Check that the load balancing exists
try:
loadBalancings = client.get('/ip/loadBalancing')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of loadBalancing, '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
if name not in loadBalancings:
module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
# Check that no task is pending before going on
try:
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for no pending '
'tasks before executing the module '.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of pending tasks '
'of the loadBalancing, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
try:
backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of backends '
'of the loadBalancing, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
backendExists = backend in backends
moduleChanged = False
if state == "absent":
if backendExists:
# Remove backend
try:
client.delete(
'/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of removing backend task'.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for deleting the backend, '
'check application key, secret, consumerkey and '
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
moduleChanged = True
else:
if backendExists:
# Get properties
try:
backendProperties = client.get(
'/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the backend properties, '
'check application key, secret, consumerkey and '
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
if (backendProperties['weight'] != weight):
# Change weight
try:
client.post(
'/ip/loadBalancing/{0}/backend/{1}/setWeight'
.format(name, backend), weight=weight)
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of setWeight to backend task'
.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for updating the weight of the '
'backend, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
moduleChanged = True
if (backendProperties['probe'] != probe):
# Change probe
backendProperties['probe'] = probe
try:
client.put(
'/ip/loadBalancing/{0}/backend/{1}'
.format(name, backend), probe=probe)
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion of '
'setProbe to backend task'
.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for updating the propbe of '
'the backend, check application key, secret, '
'consumerkey and parameters. Error returned by OVH api '
'was : {0}'
.format(apiError))
moduleChanged = True
else:
# Creates backend
try:
try:
client.post('/ip/loadBalancing/{0}/backend'.format(name),
ipBackend=backend, probe=probe, weight=weight)
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for creating the backend, check '
'application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'
.format(apiError))
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion of '
'backend creation task'.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for creating the backend, check '
'application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
moduleChanged = True
module.exit_json(changed=moduleChanged)
# import module snippets
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| t0mk/ansible | lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py | Python | gpl-3.0 | 11,833 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
- Useful for debugging together with the 'when:' directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic message.
type: str
default: 'Hello world!'
var:
description:
- A variable name to debug.
- Mutually exclusive with the C(msg) option.
- Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
type: str
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
type: int
default: 0
version_added: '2.1'
notes:
- This module is also supported for Windows targets.
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Dag Wieers (@dagwieers)
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Print the gateway for each host when defined
ansible.builtin.debug:
msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
when: ansible_default_ipv4.gateway is defined
- name: Get uptime information
ansible.builtin.shell: /usr/bin/uptime
register: result
- name: Print return information from the previous task
ansible.builtin.debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
ansible.builtin.debug:
var: hostvars[inventory_hostname]
verbosity: 4
- name: Prints two lines of messages, but only if there is an environment value set
ansible.builtin.debug:
msg:
- "Provisioning based on YOUR_KEY which is: {{ lookup('env', 'YOUR_KEY') }}"
- "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
'''
| Fale/ansible | lib/ansible/modules/debug.py | Python | gpl-3.0 | 2,420 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_batch_map(filters)
data = []
for item in sorted(iwb_map):
for wh in sorted(iwb_map[item]):
for batch in sorted(iwb_map[item][wh]):
qty_dict = iwb_map[item][wh][batch]
data.append([item, item_map[item]["item_name"],
item_map[item]["description"], wh, batch,
qty_dict.opening_qty, qty_dict.in_qty,
qty_dict.out_qty, qty_dict.bal_qty
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("Item") + ":Link/Item:100"] + [_("Item Name") + "::150"] + [_("Description") + "::150"] + \
[_("Warehouse") + ":Link/Warehouse:100"] + [_("Batch") + ":Link/Batch:100"] + [_("Opening Qty") + "::90"] + \
[_("In Qty") + "::80"] + [_("Out Qty") + "::80"] + [_("Balance Qty") + "::90"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
frappe.throw(_("'To Date' is required"))
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select item_code, batch_no, warehouse,
posting_date, actual_qty
from `tabStock Ledger Entry`
where docstatus < 2 %s order by item_code, warehouse""" %
conditions, as_dict=1)
def get_item_warehouse_batch_map(filters):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
for d in sle:
iwb_map.setdefault(d.item_code, {}).setdefault(d.warehouse, {})\
.setdefault(d.batch_no, frappe._dict({
"opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0
}))
qty_dict = iwb_map[d.item_code][d.warehouse][d.batch_no]
if d.posting_date < filters["from_date"]:
qty_dict.opening_qty += flt(d.actual_qty)
elif d.posting_date >= filters["from_date"] and d.posting_date <= filters["to_date"]:
if flt(d.actual_qty) > 0:
qty_dict.in_qty += flt(d.actual_qty)
else:
qty_dict.out_qty += abs(flt(d.actual_qty))
qty_dict.bal_qty += flt(d.actual_qty)
return iwb_map
def get_item_details(filters):
item_map = {}
for d in frappe.db.sql("select name, item_name, description from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
| gangadharkadam/tailorerp | erpnext/stock/report/batch_wise_balance_history/batch_wise_balance_history.py | Python | agpl-3.0 | 2,666 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: David Coninckx <david@coninckx.com>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from . import contracts
from . import project_compassion
from . import ir_ui_menu
| emgirardin/compassion-modules | sponsorship_tracking/models/__init__.py | Python | agpl-3.0 | 501 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import torch
@torch.no_grad()
def l1_regularization_step(params, lr, weight_decay=1e-3):
"""
Performs L1 regularization gradient step in place, and this implementation is based
on how `torch.optimizer.SGD` performs L2 regularization updates:
https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD.step
Example usage:
# Assume `model` is an instance of a torch.nn.Module subclass, and `optimizer`
# is used to perform SGD updates on the parameters of `model`
# The following lines perform gradient updates on a specified loss with an L1
# penalty term
# Note that the L1 updates are performed separately from the updates on the
# regular objective function
loss.backward()
optimizer.step()
l1_regularization_step(params=model.parameters(), lr=0.1, weight_decay=1e-3)
:param params: a list of parameters on which the L1 regularization update will be
performed, conditioned on whether attribute `requires_grad` is True
:param lr: the learning rate used during optimization, analogous to the `lr`
parameter in `torch.optim.SGD`
:param weight_decay: the L1 penalty coefficient, analogous to the `weight_decay`
parameter (used as the L2 penalty coefficient) in
`torch.optim.SGD`
"""
for p in params:
if p.requires_grad:
grad = torch.sign(p.data)
p.add_(grad, alpha=-lr * weight_decay)
| subutai/nupic.research | src/nupic/research/frameworks/pytorch/regularization.py | Python | agpl-3.0 | 2,505 |
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt, fmt_money, cstr, cint
from selling.doctype.customer.customer import DocType
import datetime
from webnotes import msgprint, _
from selling.doctype.lead.lead import create_contact
from webnotes.model.code import get_obj
from webnotes.model.bean import getlist, copy_doclist
from selling.doctype.patient_encounter_entry.notification_schedular import get_encounters
from webnotes.model.doc import Document, make_autoname
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
entry = make_autoname(webnotes.conn.get_value('DocType', 'Patient Encounter Entry', 'autoname'))
company = webnotes.conn.sql(""" select name from tabCompany
where name = (select value from tabSingles
where doctype = 'Global Defaults' and field = 'default_company') """)[0][0]
self.doc.name = company + ' ' + entry
def validate(self):pass
# if not webnotes.conn.sql("select patient from `tabPatient Encounter Entry` where name = '%s'"%self.doc.name):
# self.send_notification()
def on_update(self):
patient_id = None
from datetime import datetime
if self.doc.status == 'Canceled':
webnotes.conn.sql("update `tabPatient Encounter Entry` set docstatus = '1' where name = '%s'"%(self.doc.name))
s1=(self.doc.start_time).split(':')
s2=(self.doc.end_time).split(':')
# date_a=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s1[0]+":"+s1[1],'%H:%M').time()))
# date_b=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s2[0]+":"+s2[1],'%H:%M').time()))
#webnotes.errprint(self.doc.entry_in_child)
if self.doc.new_user == 1 and not self.doc.new_patient:
patient_id = self.make_patient()
self.doc.new_patient=patient_id
self.create_new_contact()
self.create_customer(patient_id)
self.create_account_head(patient_id)
self.doc.save()
if self.doc.entry_in_child == 'False':
self.make_child_entry(patient_id)
#self.make_event()
if not self.doc.eventid:
self.create_child()
else:
webnotes.conn.sql("update `tabSlot Child` set slot='"+self.doc.appointment_slot+"', start_time='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', end_time='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where encounter='"+self.doc.name+"'")
# webnotes.errprint(date_a)
webnotes.conn.sql("update `tabEvent` set starts_on='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', ends_on='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where name='"+self.doc.eventid+"'")
if cint(self.doc.checked_in)==1: pass
# check_confirmed=webnotes.conn.sql("select true from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status='Confirm'",debug=1)
# if not check_confirmed:
# webnotes.conn.sql("update tabEvent set event_type='Confirm' where name='%s'"%self.doc.eventid)
# webnotes.conn.sql("update `tabSlot Child` set status='Confirm' where encounter='%s'"%self.doc.name)
# else:
# webnotes.msgprint("Selected slot is not available",raise_exception=1)
# get_encounters()
def send_notification(self):
mail_list = []
number = []
msg = """Hi %(patient)s, Your appointment has been schedule on %(encounter_date)s at time %(start_time)s
for study %(study)s on modality %(modality)s"""%{'patient': self.doc.patient, 'encounter_date':self.doc.encounter_date,
'start_time':self.doc.start_time, 'study':self.doc.study, 'modality':self.doc.modality}
technologiest_contact = webnotes.conn.sql("select cell_number, personal_email from tabEmployee where name = '%s'"%(self.doc.technologist),as_list=1)
patient_contact = webnotes.conn.sql("select mobile, email from `tabPatient Register` where name = '%s'"%(self.doc.patient),as_list=1)
# webnotes.errprint([technologiest_contact, patient_contact])
if mail_list:
mail_list.append(technologiest_contact[0][1])
mail_list.append(patient_contact[0][1])
if number:
number.append(technologiest_contact[0][0])
number.append(patient_contact[0][0])
self.send_mail(msg, mail_list)
self.send_sms(msg, number)
def send_mail(self, msg, mail_list):
from webnotes.utils.email_lib import sendmail
for id in mail_list:
if id:
sendmail(id, subject='Appoiontment Scheduling', msg = msg)
def send_sms(self, msg, number):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
# webnotes.errprint(ss)
for num in number:pass
# webnotes.errprint(['number',num])
args = {}
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
sms_url=webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url')
msg_parameter=webnotes.conn.get_value('SMS Settings', None, 'message_parameter')
receiver_parameter=webnotes.conn.get_value('SMS Settings', None, 'receiver_parameter')
for num in number:
if num:
url = sms_url +"?user="+ args["user"] +"&senderID="+ args["sender ID"] +"&receipientno="+ num +"\
&dcs="+ args["dcs"]+ "&msgtxt=" + msg +"&state=" +args["state"]
# webnotes.errprint(url)
import requests
r = requests.get(url)
def create_new_contact(self):
details = {}
details['first_name'] = self.doc.first_name
details['email_id'] = self.doc.email or ''
details['mobile_no'] = self.doc.mobile or ''
details['doc']='Customer'
details['link']=self.doc.name or ''
create_contact(details)
def create_customer(self, patient_id):
from webnotes.model.doc import Document
d = Document('Customer')
d.customer_name = patient_id
d.full_name = self.doc.first_name
d.save()
def get_company_abbr(self):
return webnotes.conn.get_value('Company', self.doc.company, 'abbr')
def get_receivables_group(self):
g = webnotes.conn.sql("select receivables_group from tabCompany where name=%s", self.doc.company)
g = g and g[0][0] or ''
if not g:
msgprint("Update Company master, assign a default group for Receivables")
raise Exception
return g
def create_account_head(self,patient_id):
if self.doc.company :
abbr = self.get_company_abbr()
if not webnotes.conn.exists("Account", (self.doc.name + " - " + abbr)):
parent_account = self.get_receivables_group()
# create
ac_bean = webnotes.bean({
"doctype": "Account",
'account_name': patient_id,
'parent_account': parent_account,
'group_or_ledger':'Ledger',
'company':self.doc.company,
'master_type':'Patient Ecounter Entry',
'master_name':patient_id,
"freeze_account": "No"
})
ac_bean.ignore_permissions = True
ac_bean.insert()
webnotes.msgprint(_("Account Head") + ": " + ac_bean.doc.name + _(" created"))
else :
webnotes.msgprint(_("Please Select Company under which you want to create account head"))
def create_child(self):
from datetime import datetime
# date_a=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(self.doc.start_time,'%H:%M').time()))
# date_b=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(self.doc.end_time,'%H:%M').time()))
if self.doc.appointment_slot:
# webnotes.errprint([self.doc.start_time])
check_confirmed=webnotes.conn.sql("select true from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status='Confirm'")
# webnotes.errprint(check_confirmed)
if not check_confirmed:
check_status=webnotes.conn.sql("select case when count(*)<2 then true else false end from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status<>'Cancel'",as_list=1)
# webnotes.errprint(check_status[0][0])
if check_status[0][0]==1:
d=Document("Slot Child")
d.slot=self.doc.appointment_slot
d.modality=self.doc.encounter
d.study=self.doc.study
d.status='Waiting'
d.encounter=self.doc.name
d.start_time=date_a
d.end_time=date_b
d.save()
self.make_event(d.name)
self.doc.slot=d.name
else:
webnotes.msgprint("Selected slot is not available",raise_exception=1)
else:
webnotes.msgprint("Selected slot is not available",raise_exception=1)
def make_patient(self):
d = Document('Patient Register')
d.customer_name = self.doc.first_name + ' ' + self.doc.last_name
d.mobile = self.doc.phone_number
d.company=self.doc.company
d.save()
return d.name
def child_entry(self,patient_data):
services = webnotes.conn.sql(""" SELECT foo.*, case when exists(select true from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name and a.referral_fee <> 0) then (select a.referral_fee from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name) else (select ifnull(referral_fee,0) from tabStudy where name=foo.study) end as referral_fee,
case when exists(select true from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name and a.referral_fee <> 0) then (select a.referral_rule from `tabPhysician Values` a WHERE a.study_name=foo.study AND a.parent=foo.referrer_name) else (select referral_rule from tabStudy where name=foo.study) end as referral_rule
FROM ( SELECT s.study_aim AS study,'' as item, '1' as qty,
s.study_aim as parent,s.modality, e.encounter,e.referrer_name, e.name, s.discount_type,s.study_detials,s.discounted_value as dis_value FROM `tabEncounter` e, tabStudy s WHERE ifnull(e.is_invoiced,'False')='False' AND
e.parent ='%(parent)s' and s.name = e.study) AS foo union
select '',item,qty,parent,'','','','','','','','','' from `tabStudy Recipe Details` where parent in(
SELECT
s.study_aim AS study
FROM
`tabEncounter` e,
tabStudy s
WHERE
ifnull(e.is_invoiced,'False')='False'
AND e.parent ='%(parent)s'
AND s.name = e.study
)
order by parent,qty"""%({"parent":patient_data}),as_dict=1)
patient_data_new=[]
tot_amt = 0.0
if services:
for srv in services:
# cld = addchild(self.doc, 'entries', 'Sales Invoice Item',self.doclist)
# cld.study = srv['study']
# cld.modality = srv['modality']
# cld.encounter_id = srv['name']
# cld.discount_type = srv['discount_type']
export_rate=webnotes.conn.sql("""select study_fees from tabStudy where name = '%s' """%srv['study'],as_list=1)
srv['export_rate'] = export_rate[0][0] if export_rate else 0
if cint(srv['export_rate'])==0:
item_export_rate=webnotes.conn.sql("""select price from tabItem where name = '%s' """%srv['item'],as_list=1)
srv['export_rate'] = item_export_rate[0][0] if item_export_rate else 0
# cld.referrer_name=srv['referrer_name']
if srv['referrer_name']:
acc_head = webnotes.conn.sql("""select name from `tabAccount` where master_name='%s'"""%(srv['referrer_name']))
if acc_head and acc_head[0][0]:
srv['referrer_physician_credit_to'] = acc_head[0][0]
# cld.referral_rule= srv['referral_rule']
# cld.referral_fee= srv['referral_fee']
if srv['discount_type']=='Regular discount':
# cld.discount=srv['dis_value']
srv['basic_charges']=cstr(flt(srv['export_rate']-flt(flt(srv['export_rate'])*flt(srv['dis_value'])/100)))
srv['discount_in_amt']=cstr(flt(flt(srv['export_rate'])*flt(srv['dis_value'])/100))
else:
if srv['referral_rule'] == 'Fixed Cost':
srv['basic_charges']=cstr(flt(srv['export_rate'])-flt(srv['referral_fee']))
srv['discount_in_amt']=cstr(srv['referral_fee'])
else:
srv['basic_charges']=cstr(flt(srv['export_rate'])*flt(srv['qty']) - (flt(srv['export_rate'])*(flt(srv['referral_fee'])/100)))
# webnotes.errprint(["sdas",srv['basic_charges']])
srv['dis_value'] = cstr(srv['referral_fee'])
#cld.discount=cstr(round(flt(cld.referral_fee)/flt(cld.export_rate)*100,2))
# cld.description=srv['study_detials']
# cld.qty=1
tot_amt = flt(srv['basic_charges']) + tot_amt
srv['amount'] = tot_amt
patient_data_new.append(srv)
# webnotes.errprint(patient_data_new)
return patient_data_new
else:
webnotes.msgprint("Bill already made")
def make_child_entry(self, patient_id=None):
enct = Document('Encounter')
# webnotes.errprint([enct, self.doc.patient])
enct.encounter = self.doc.encounter
enct.study = self.doc.study
enct.encounter_date = self.doc.encounter_date
enct.radiologist_name = self.doc.radiologist_name
enct.referrer_name = self.doc.referrer_name
enct.problem_description = self.doc.problem_description
enct.metal_in = self.doc.metal_in
enct.pacemaker = self.doc.pacemaker
enct.claustrophobia = self.doc.claustrophobia
enct.pregnancy = self.doc.pregnancy
enct.others = self.doc.others
enct.procedure_alert = self.doc.procedure_alert
enct.parent = self.doc.patient if self.doc.patient else patient_id
enct.id = self.doc.name
enct.save(new=1)
self.doc.entry_in_child = 'True'
self.doc.save()
def make_event(self,name_slot):
evnt = Document('Event')
evnt.slot=name_slot
evnt.event_type = 'Waiting'
evnt.starts_on = self.doc.encounter_date + ' ' +self.doc.start_time
evnt.ends_on = self.doc.encounter_date + ' ' +self.doc.end_time
if cint(self.doc.new_user)==1:
evnt.patient = self.doc.new_patient
evnt.patient_name= self.doc.first_name + ' ' + self.doc.last_name
else:
evnt.patient = self.doc.patient
evnt.patient_name= self.doc.patient_name
evnt.service = self.doc.study
evnt.subject = self.doc.study
evnt.modality=self.doc.encounter
evnt.study=self.doc.study
evnt.save()
self.doc.eventid = evnt.name
self.doc.save()
@webnotes.whitelist()
def get_employee(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("select name, employee_name from tabEmployee where designation = 'Radiologist'")
@webnotes.whitelist()
def get_patient_details(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, first_name from `tabPatient Register`
where docstatus < 2
and (%(key)s like "%(txt)s"
or first_name like "%(txt)s")
order by
case when name like "%(txt)s" then 0 else 1 end,
case when first_name like "%(txt)s" then 0 else 1 end,
name
limit %(start)s, %(page_len)s""" % {'key': searchfield, 'txt': "%%%s%%" % txt,
'start': start, 'page_len': page_len})
@webnotes.whitelist()
def update_event(checked, dname,encounter):
if cint(checked) == 1:
webnotes.conn.sql("update tabEvent set event_type='Confirm' where name='%s'"%dname)
# webnotes.errprint(encounter)
webnotes.conn.sql("update `tabSlot Child` set status='Confirm' where encounter='%s'"%encounter)
@webnotes.whitelist()
def get_events(start, end, doctype,op,filters=None):
# webnotes.errprint(['hello',doctype, op])
cnd =''
if op:
cnd = "and encounter = '%(pros)s'"%{"pros":op}
from webnotes.widgets.reportview import build_match_conditions
#if not webnotes.has_permission("Task"):
# webnotes.msgprint(_("No Permission"), raise_exception=1)
conditions = ''
# conditions = build_match_conditions("Patient Encounter Entry")
# conditions and (" and " + conditions) or ""
if filters:
filters = json.loads(filters)
for key in filters:
if filters[key]:
conditions += " and " + key + ' = "' + filters[key].replace('"', '\"') + '"'
data = webnotes.conn.sql("""select name, start_time, end_time,
study, status,encounter from `tabPatient Encounter Entry`
where ((start_time between '%(start)s' and '%(end)s') \
or (end_time between '%(start)s' and '%(end)s')) %(cnd)s
%(conditions)s""" % {
"start": start,
"end": end,
"conditions": conditions,
"cnd":cnd
}, as_dict=True, update={"allDay": 0})
return data
@webnotes.whitelist()
def get_modality():
return webnotes.conn.sql("select name from tabModality", as_list=1)
@webnotes.whitelist()
def get_study(modality):
return webnotes.conn.sql("select name from tabStudy where modality = '%s'"%modality, as_list=1)
@webnotes.whitelist()
def set_slot(modality, study, start_time, end_time):
time = get_study_time(study)
if cint(time) > 30:
start_time = calc_start_time(start_time, modality)
end_time = calc_end_time(cstr(start_time),time)
start_time, end_time = check_availability(modality, start_time, end_time, time)
return start_time, end_time
def check_availability(modality, start_time, end_time, time):
# webnotes.errprint(start_time)
count = webnotes.conn.sql("""select sum(case when status = 'Waiting' then 2 when status = 'Confirmed' then 1 else 0 end) as status from `tabPatient Encounter Entry`
where encounter = '%(encounter)s' and start_time = '%(start_time)s' and end_time = '%(end_time)s'
"""%{'encounter':modality, 'start_time':start_time, 'end_time':end_time},as_list=1)
if count[0][0] in (1, 4, 3):
# webnotes.errprint("if loop")
start_time = end_time
end_time = calc_end_time(cstr(start_time),time)
return check_availability(modality, start_time, end_time, time)
else:
# webnotes.errprint(["else loop", start_time, end_time])
return start_time, end_time
# def get_modality_time(modality):
# return webnotes.conn.get_value('Modality',modality,'time_required')
def get_study_time(study):
return webnotes.conn.get_value('Study',modality,'study_time')
def calc_end_time(start_time,time):
import datetime
now = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end_time = now + datetime.timedelta(minutes=cint(time))
return end_time
def calc_start_time(start_time, modality):
end_slot = datetime.datetime.strptime(cstr(start_time), '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=30)
start_time_list = webnotes.conn.sql("""select end_time from `tabPatient Encounter Entry`
where encounter='%(encounter)s' and end_time between '%(start_time)s'
and '%(end_slot)s'"""%{'encounter':modality, 'start_time':start_time, 'end_slot':end_slot})
if start_time_list:
start_time = start_time_list[0][0]
return start_time
@webnotes.whitelist()
def get_patient(patient_id):
get_obj('DB SYNC', 'DB SYNCl').sync_db(patient_id)
@webnotes.whitelist()
def create_patient(first_name,last_name,gender,date_of_birth,mobile_no,email, branch):
# webnotes.errprint([first_name,last_name,gender,date_of_birth,mobile_no,email])
d = Document('Patient Register')
d.first_name = first_name
d.last_name = last_name
d.birth_date = date_of_birth
d.gender = gender
d.mobile = mobile_no
d.email = email
d.lab_branch = branch
d.save()
return d.name | saurabh6790/medsyn-app1 | selling/doctype/patient_encounter_entry/patient_encounter_entry.py | Python | agpl-3.0 | 25,080 |
"""
Tests for coursewarehistoryextended
Many aspects of this app are covered by the courseware tests,
but these are specific to the new storage model with multiple
backend tables.
"""
import json
from unittest import skipUnless
from unittest.mock import patch
from django.conf import settings
from django.db import connections
from django.test import TestCase
from lms.djangoapps.courseware.models import BaseStudentModuleHistory, StudentModule, StudentModuleHistory
from lms.djangoapps.courseware.tests.factories import COURSE_KEY
from lms.djangoapps.courseware.tests.factories import LOCATION
from lms.djangoapps.courseware.tests.factories import StudentModuleFactory
@skipUnless(settings.FEATURES["ENABLE_CSMH_EXTENDED"], "CSMH Extended needs to be enabled")
class TestStudentModuleHistoryBackends(TestCase):
""" Tests of data in CSMH and CSMHE """
# Tell Django to clean out all databases, not just default
databases = set(connections)
def setUp(self):
super().setUp()
for record in (1, 2, 3):
# This will store into CSMHE via the post_save signal
csm = StudentModuleFactory.create(
module_state_key=LOCATION('usage_id'),
course_id=COURSE_KEY,
state=json.dumps({'type': 'csmhe', 'order': record}),
)
# This manually gets us a CSMH record to compare
csmh = StudentModuleHistory(student_module=csm,
version=None,
created=csm.modified,
state=json.dumps({'type': 'csmh', 'order': record}),
grade=csm.grade,
max_grade=csm.max_grade)
csmh.save()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True})
def test_get_history_true_true(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 6
assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state)
assert {'type': 'csmh', 'order': 3} == json.loads(history[3].state)
assert {'type': 'csmh', 'order': 2} == json.loads(history[4].state)
assert {'type': 'csmh', 'order': 1} == json.loads(history[5].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": True})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False})
def test_get_history_true_false(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 3
assert {'type': 'csmhe', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmhe', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmhe', 'order': 1} == json.loads(history[2].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": True})
def test_get_history_false_true(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 3
assert {'type': 'csmh', 'order': 3} == json.loads(history[0].state)
assert {'type': 'csmh', 'order': 2} == json.loads(history[1].state)
assert {'type': 'csmh', 'order': 1} == json.loads(history[2].state)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_CSMH_EXTENDED": False})
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES": False})
def test_get_history_false_false(self):
student_module = StudentModule.objects.all()
history = BaseStudentModuleHistory.get_history(student_module)
assert len(history) == 0
| eduNEXT/edx-platform | lms/djangoapps/coursewarehistoryextended/tests.py | Python | agpl-3.0 | 4,269 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FastaNameFilter
Description: Filter a fasta file based on a string to find in the
sequences headers, or given a file with a list of id
fastaNameFilter.py -i input.fa -o output.fa -s "stringtofind"
fastaNameFilter.py -i input.fa -o output.fa -f sequencesnames.ids
-----------------------------------------------------------------------
Author: This software is written and maintained by Pierre Pericard
(pierre.pericard@ed.univ-lille1.fr)
Created: 2014
Last Modified: 2016-01-13
Licence: GNU GPL 3.0
Copyright 2014-2016 Pierre Pericard
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import argparse
import string
import re
def read_fasta_file_handle(fasta_file_handle):
"""
Parse a fasta file and return a generator
"""
# Variables initialization
header = ''
seqlines = list()
sequence_nb = 0
# Reading input file
for line in fasta_file_handle:
if line[0] == '>':
# Yield the last read header and sequence
if sequence_nb:
yield (header, ''.join(seqlines))
del seqlines[:]
# Get header
header = line[1:].rstrip()
sequence_nb += 1
else:
# Concatenate sequence
seqlines.append(line.strip())
# Yield the input file last sequence
yield (header, ''.join(seqlines))
# Close input file
fasta_file_handle.close()
def format_seq(seq, linereturn=80):
"""
Format an input sequence
"""
buff = list()
for i in range(0, len(seq), linereturn):
buff.append("{0}\n".format(seq[i:(i + linereturn)]))
return ''.join(buff).rstrip()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filter a fasta file based on sequence name.')
parser.add_argument('-i', '--input_fasta', metavar='input',
type=argparse.FileType('r'), default='-',
help='input fasta file')
parser.add_argument('-o', '--output_fasta', metavar='output',
type=argparse.FileType('w'), default='-',
help='ouput fasta file')
parser.add_argument('-s', '--stringtofind', metavar='string',
type=str, help='String to filter on')
parser.add_argument('-f', '--fileids', metavar='file',
type=argparse.FileType('r'),
help='File with ids')
args = parser.parse_args()
if not args.stringtofind and not args.fileids:
parser.print_help()
raise Exception('Either a string or an id file has to be supplied')
if args.fileids:
ids_list = list()
# read ids and store them
for line in args.fileids:
ids_list.append(line.strip())
# convert the id list to a frozenset for fast search
ids_set = frozenset(ids_list)
# filter the fasta file
for header, sequence in read_fasta_file_handle(args.input_fasta):
seq_id = header.split()[0]
if seq_id in ids_set:
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
else:
tofind = re.compile(args.stringtofind, flags=re.IGNORECASE)
for header, sequence in read_fasta_file_handle(args.input_fasta):
if tofind.search(header):
args.output_fasta.write(">{0}\n{1}\n".format(header, format_seq(sequence)))
| bonsai-team/matam | scripts/fasta_name_filter.py | Python | agpl-3.0 | 4,067 |