text stringlengths 4 1.02M | meta dict |
|---|---|
import unittest
from wiring.dependency import inject, injected
from wiring.providers import (
FactoryProvider,
FunctionProvider,
InstanceProvider,
IProvider
)
from . import ModuleTest
class ProvidersModuleTest(ModuleTest):
module = 'wiring.providers'
class FactoryProviderTest(unittest.TestCase):
def test_basic(self):
def factory():
return 42
provider = FactoryProvider(factory)
IProvider.check_compliance(provider)
self.assertDictEqual(provider.dependencies, {})
self.assertEqual(provider(), 42)
def test_dependencies(self):
@inject(injected(12), None, ('foo', 14), foobar=4)
def function(first, second, third, foo=injected('test'),
foobar=None):
return (first, second, third, foo, foobar)
provider = FactoryProvider(function)
self.assertDictEqual(
provider.dependencies,
{
0: 12,
2: ('foo', 14),
'foo': 'test',
'foobar': 4,
}
)
self.assertTupleEqual(
provider(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5)
)
def test_class(self):
class TestClass(object):
@inject(injected(12), None, ('foo', 14), foobar=4)
def __init__(self, second, third, foo=injected('test'),
foobar=None):
self.arguments = (second, third, foo)
provider = FactoryProvider(TestClass)
self.assertDictEqual(
provider.dependencies,
{
0: 12,
2: ('foo', 14),
'foo': 'test',
'foobar': 4,
}
)
self.assertIsInstance(
provider(1, 2, 3,),
TestClass
)
self.assertTupleEqual(
provider(1, 2, 3,).arguments,
(1, 2, 3)
)
class FunctionProviderTest(unittest.TestCase):
def test(self):
def foo(bar, foobar=injected('test')):
return bar + foobar
provider = FunctionProvider(foo)
IProvider.check_compliance(provider)
self.assertEqual(provider.function, foo)
self.assertDictEqual(
provider.dependencies,
{
'foobar': 'test',
}
)
wrapped_function = provider(foobar=12)
self.assertEqual(wrapped_function(5), 17)
self.assertEqual(wrapped_function(6), 18)
self.assertEqual(wrapped_function(-2), 10)
wrapped_function = provider(foobar=1)
self.assertEqual(wrapped_function(6), 7)
self.assertEqual(wrapped_function(-2), -1)
def test_variable_arguments(self):
"""
This test is related to a problem with variable number of arguments
and FunctionProvider, see issue #4.
"""
def foo(*args):
return tuple(args)
provider = FunctionProvider(foo)
wrapped_function = provider()
self.assertSequenceEqual(wrapped_function(1, 2), (1, 2))
self.assertSequenceEqual(wrapped_function(1), (1,))
def test_kwargs_copy(self):
def foo(*args, **kwargs):
return tuple(args), dict(kwargs)
provider = FunctionProvider(foo)
wrapped_function = provider()
self.assertSequenceEqual(
wrapped_function(1, 2, test=1),
((1, 2), {'test': 1})
)
self.assertSequenceEqual(
wrapped_function(1, 2, test=2),
((1, 2), {'test': 2})
)
self.assertSequenceEqual(
wrapped_function(1, 2),
((1, 2), {})
)
provider = FunctionProvider(foo)
wrapped_function = provider()
self.assertSequenceEqual(
wrapped_function(1, 2, test=1),
((1, 2), {'test': 1})
)
self.assertSequenceEqual(
wrapped_function(test=2),
((), {'test': 2})
)
class InstanceProviderTest(unittest.TestCase):
def test(self):
instance = object()
provider = InstanceProvider(instance)
IProvider.check_compliance(provider)
self.assertDictEqual(provider.dependencies, {})
self.assertEqual(provider(), instance)
| {
"content_hash": "528ccfafe13c6affd2c8ca106a136f3d",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 75,
"avg_line_length": 29.074324324324323,
"alnum_prop": 0.542644666511736,
"repo_name": "msiedlarek/wiring",
"id": "52c4b3003c324d32145d4416aa2cca4163c2ef19",
"size": "4303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/all/providers_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "135"
},
{
"name": "HTML",
"bytes": "485"
},
{
"name": "Python",
"bytes": "139722"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String
from app import Base, engine
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(30), nullable=False)
first_name = Column(String(30), nullable=True)
last_name = Column(String(30), nullable=True)
email = Column(String(75), nullable=False)
password = Column(String(128), nullable=True)
if __name__ == '__main__':
Base.metadata.create_all(engine)
| {
"content_hash": "82d7eb2701029df3ff390b407208f71c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 28.352941176470587,
"alnum_prop": 0.6763485477178424,
"repo_name": "duoduo369/python-social-auth",
"id": "784f2cf40cef66b36a7b848876e360b1c2bbcd4d",
"size": "482",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/tornado_example/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Python",
"bytes": "562718"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
"""
Django accounts management made easy.
"""
default_app_config = 'userena.apps.UserenaConfig'
VERSION = (2, 0, 1)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns string with digit parts only as version.
"""
return '.'.join((str(each) for each in VERSION[:3]))
| {
"content_hash": "49f8b077dd4630b6f127af69c7dc71f7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 19.235294117647058,
"alnum_prop": 0.6238532110091743,
"repo_name": "mortenwh/django-userena",
"id": "2fcbd7dece7093cb54fc002f7a638e777a115c76",
"size": "327",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "userena/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18379"
},
{
"name": "HTML",
"bytes": "40397"
},
{
"name": "Nginx",
"bytes": "101"
},
{
"name": "Python",
"bytes": "255328"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
import argparse
from fabric.api import *
from fabric.contrib.files import exists
from dlab.meta_lib import *
import os
from dlab.fab import *
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--spark_master', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--notebook_ip', type=str, default='')
parser.add_argument('--datalake_enabled', type=str, default='false')
args = parser.parse_args()
def configure_notebook(keyfile, hoststring):
scripts_dir = '/root/scripts/'
templates_dir = '/root/templates/'
run('mkdir -p /tmp/{}/'.format(args.cluster_name))
put(templates_dir + 'notebook_spark-defaults_local.conf', '/tmp/{}/notebook_spark-defaults_local.conf'.format(args.cluster_name))
spark_master_ip = args.spark_master.split('//')[1].split(':')[0]
spark_memory = get_spark_memory(True, args.os_user, spark_master_ip, keyfile)
run('echo "spark.executor.memory {0}m" >> /tmp/{1}/notebook_spark-defaults_local.conf'.format(spark_memory, args.cluster_name))
if not exists('/usr/local/bin/rstudio_dataengine_create_configs.py'):
put(scripts_dir + 'rstudio_dataengine_create_configs.py', '/usr/local/bin/rstudio_dataengine_create_configs.py', use_sudo=True)
sudo('chmod 755 /usr/local/bin/rstudio_dataengine_create_configs.py')
if not exists('/usr/lib/python2.7/dlab/'):
sudo('mkdir -p /usr/lib/python2.7/dlab/')
put('/usr/lib/python2.7/dlab/*', '/usr/lib/python2.7/dlab/', use_sudo=True)
sudo('chmod a+x /usr/lib/python2.7/dlab/*')
if exists('/usr/lib64'):
sudo('ln -fs /usr/lib/python2.7/dlab /usr/lib64/python2.7/dlab')
if __name__ == "__main__":
env.hosts = "{}".format(args.notebook_ip)
env.user = args.os_user
env.key_filename = "{}".format(args.keyfile)
env.host_string = env.user + "@" + env.hosts
try:
region = os.environ['aws_region']
except:
region = ''
configure_notebook(args.keyfile, env.host_string)
sudo("/usr/bin/python /usr/local/bin/rstudio_dataengine_create_configs.py "
"--cluster_name {} --spark_version {} --hadoop_version {} --os_user {} --spark_master {} --region {} --datalake_enabled {}".
format(args.cluster_name, args.spark_version, args.hadoop_version, args.os_user, args.spark_master, region,
args.datalake_enabled))
| {
"content_hash": "514a694d0e805486ce1bac0f3a0a6cc0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 135,
"avg_line_length": 50.36538461538461,
"alnum_prop": 0.6655211912943871,
"repo_name": "epam/DLab",
"id": "2e5cfb56127fa68ffd69050c9d7e586b9496a166",
"size": "3388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure-provisioning/src/general/scripts/os/rstudio_install_dataengine_kernels.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81633"
},
{
"name": "HTML",
"bytes": "110323"
},
{
"name": "Java",
"bytes": "2473499"
},
{
"name": "Jupyter Notebook",
"bytes": "80955"
},
{
"name": "Python",
"bytes": "1861086"
},
{
"name": "R",
"bytes": "4894"
},
{
"name": "Ruby",
"bytes": "62731"
},
{
"name": "Shell",
"bytes": "18826"
},
{
"name": "TypeScript",
"bytes": "363308"
}
],
"symlink_target": ""
} |
from pathlib import Path
from setuptools import find_packages
from setuptools import setup
def read(fname: str) -> str:
file_path = Path(__file__).parent / fname
return file_path.read_text(encoding="UTF-8")
setup(
name="pytest-regressions",
use_scm_version=True,
setup_requires=[
"setuptools_scm; python_version>'3.6'",
"setuptools_scm <7.0; python_version=='3.6'",
],
author="ESSS",
author_email="foss@esss.co",
maintainer="Bruno Oliveira",
maintainer_email="bruno@esss.co",
license="MIT",
url="https://github.com/ESSS/pytest-regressions",
description="Easy to use fixtures to write regression tests.",
long_description=read("README.rst"),
packages=find_packages("src"),
package_dir={"": "src"},
python_requires=">=3.6",
package_data={
"pytest_regressions": ["py.typed"],
},
extras_require={
"dev": [
"matplotlib",
"mypy",
"numpy",
"pandas",
"pillow",
"pre-commit",
"restructuredtext-lint",
"tox",
],
"num": [
"numpy",
"pandas",
],
"image": ["pillow", "numpy"],
"dataframe": ["numpy", "pandas"],
},
install_requires=["pytest-datadir>=1.2.0", "pytest>=6.2.0", "pyyaml"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Pytest",
"Intended Audience :: Developers",
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
entry_points={"pytest11": ["regressions = pytest_regressions.plugin"]},
)
| {
"content_hash": "8d159e20c3cbd40a2bbcb66a3dcd860b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 75,
"avg_line_length": 30.88235294117647,
"alnum_prop": 0.5585714285714286,
"repo_name": "ESSS/pytest-regressions",
"id": "b7cf9da38d970403cf5221dc93ecb6e20f24e629",
"size": "2100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119581"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='django-wsgi',
version='0.1alpha1',
description="A library for better integration between django and the WSGI world.",
long_description=open('README.txt').read(),
author='Alex Gaynor',
author_email='alex.gaynor@gmail.com',
license='BSD',
url='http://github.com/alex/django-wsgi',
py_modules=['django_wsgi'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP :: WSGI',
],
)
| {
"content_hash": "600afe7008d6b6171ef15b33d58feba9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 86,
"avg_line_length": 33.17391304347826,
"alnum_prop": 0.6159895150720839,
"repo_name": "alex/django-wsgi",
"id": "da534d6f99b706b333244062ee693d328a97b192",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4522"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models.job_post import JobPost
from .models.company import Company
admin.site.register(JobPost)
admin.site.register(Company) | {
"content_hash": "8fc8e0ef64057e5a05a3a1660ce7ba3d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.8242424242424242,
"repo_name": "abernet2/job-searcherv2",
"id": "72f054e5debadbda6582949111448ec9a0d9eab8",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job_parser/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3123"
},
{
"name": "JavaScript",
"bytes": "989"
},
{
"name": "Python",
"bytes": "19773"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_aakuan_defender_human_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "cb921b38126dd6559509e60bf0e368f4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.7003154574132492,
"repo_name": "obi-two/Rebelion",
"id": "592a11bab3d9b9de993d35e5a1541e0894da7fb3",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_aakuan_defender_human_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from citext import CIText
from sqlalchemy import (
Table, Column, CheckConstraint, ForeignKey, Index, UniqueConstraint,
)
from sqlalchemy import Boolean, DateTime, Integer, String, Unicode
from sqlalchemy import sql
from warehouse.application import Warehouse
users = Table(
"accounts_user",
Warehouse.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("password", String(length=128), nullable=False),
Column("last_login", DateTime(), nullable=False),
Column("is_superuser", Boolean(), nullable=False),
Column("username", CIText(), nullable=False, unique=True),
Column("name", Unicode(length=100), nullable=False),
Column("is_staff", Boolean(), nullable=False),
Column("is_active", Boolean(), nullable=False),
Column("date_joined", DateTime(), server_default=sql.func.now()),
CheckConstraint("length(username) <= 50", name="packages_valid_name"),
CheckConstraint(
"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
name="accounts_user_valid_username",
),
)
emails = Table(
"accounts_email",
Warehouse.metadata,
Column("id", Integer(), primary_key=True, nullable=False),
Column("user_id",
Integer(),
ForeignKey(
"accounts_user.id",
deferrable=True,
initially="DEFERRED",
),
nullable=False,
),
Column("email", Unicode(length=254), nullable=False),
Column("primary", Boolean(), nullable=False),
Column("verified", Boolean(), nullable=False),
UniqueConstraint("email", name="accounts_email_email_key"),
Index("accounts_email_email_like", "email"),
Index("accounts_email_user_id", "user_id"),
)
| {
"content_hash": "80c3e454e63bb66a828c027d3c6a4cb4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 32.03508771929825,
"alnum_prop": 0.6544359255202629,
"repo_name": "mattrobenolt/warehouse",
"id": "17f0c68d29bf8ce86d81fc062b6508b0260b739a",
"size": "2399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "warehouse/accounts/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151221"
},
{
"name": "JavaScript",
"bytes": "58411"
},
{
"name": "Python",
"bytes": "293257"
},
{
"name": "Ruby",
"bytes": "339"
}
],
"symlink_target": ""
} |
"""Wrappers for protocol buffer enum types."""
import enum
class Likelihood(enum.IntEnum):
"""
A bucketized representation of likelihood, which is intended to give clients
highly stable results across model upgrades.
Attributes:
UNKNOWN (int): Unknown likelihood.
VERY_UNLIKELY (int): It is very unlikely that the image belongs to the specified vertical.
UNLIKELY (int): It is unlikely that the image belongs to the specified vertical.
POSSIBLE (int): It is possible that the image belongs to the specified vertical.
LIKELY (int): It is likely that the image belongs to the specified vertical.
VERY_LIKELY (int): It is very likely that the image belongs to the specified vertical.
"""
UNKNOWN = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
class Block(object):
class BlockType(enum.IntEnum):
"""
Type of a block (text, image etc) as identified by OCR.
Attributes:
UNKNOWN (int): Unknown block type.
TEXT (int): Regular text block.
TABLE (int): Table block.
PICTURE (int): Image block.
RULER (int): Horizontal/vertical line box.
BARCODE (int): Barcode block.
"""
UNKNOWN = 0
TEXT = 1
TABLE = 2
PICTURE = 3
RULER = 4
BARCODE = 5
class FaceAnnotation(object):
class Landmark(object):
class Type(enum.IntEnum):
"""
Face landmark (feature) type. Left and right are defined from the
vantage of the viewer of the image without considering mirror
projections typical of photos. So, ``LEFT_EYE``, typically, is the
person's right eye.
Attributes:
UNKNOWN_LANDMARK (int): Unknown face landmark detected. Should not be filled.
LEFT_EYE (int): Left eye.
RIGHT_EYE (int): Right eye.
LEFT_OF_LEFT_EYEBROW (int): Left of left eyebrow.
RIGHT_OF_LEFT_EYEBROW (int): Right of left eyebrow.
LEFT_OF_RIGHT_EYEBROW (int): Left of right eyebrow.
RIGHT_OF_RIGHT_EYEBROW (int): Right of right eyebrow.
MIDPOINT_BETWEEN_EYES (int): Midpoint between eyes.
NOSE_TIP (int): Nose tip.
UPPER_LIP (int): Upper lip.
LOWER_LIP (int): Lower lip.
MOUTH_LEFT (int): Mouth left.
MOUTH_RIGHT (int): Mouth right.
MOUTH_CENTER (int): Mouth center.
NOSE_BOTTOM_RIGHT (int): Nose, bottom right.
NOSE_BOTTOM_LEFT (int): Nose, bottom left.
NOSE_BOTTOM_CENTER (int): Nose, bottom center.
LEFT_EYE_TOP_BOUNDARY (int): Left eye, top boundary.
LEFT_EYE_RIGHT_CORNER (int): Left eye, right corner.
LEFT_EYE_BOTTOM_BOUNDARY (int): Left eye, bottom boundary.
LEFT_EYE_LEFT_CORNER (int): Left eye, left corner.
RIGHT_EYE_TOP_BOUNDARY (int): Right eye, top boundary.
RIGHT_EYE_RIGHT_CORNER (int): Right eye, right corner.
RIGHT_EYE_BOTTOM_BOUNDARY (int): Right eye, bottom boundary.
RIGHT_EYE_LEFT_CORNER (int): Right eye, left corner.
LEFT_EYEBROW_UPPER_MIDPOINT (int): Left eyebrow, upper midpoint.
RIGHT_EYEBROW_UPPER_MIDPOINT (int): Right eyebrow, upper midpoint.
LEFT_EAR_TRAGION (int): Left ear tragion.
RIGHT_EAR_TRAGION (int): Right ear tragion.
LEFT_EYE_PUPIL (int): Left eye pupil.
RIGHT_EYE_PUPIL (int): Right eye pupil.
FOREHEAD_GLABELLA (int): Forehead glabella.
CHIN_GNATHION (int): Chin gnathion.
CHIN_LEFT_GONION (int): Chin left gonion.
CHIN_RIGHT_GONION (int): Chin right gonion.
"""
UNKNOWN_LANDMARK = 0
LEFT_EYE = 1
RIGHT_EYE = 2
LEFT_OF_LEFT_EYEBROW = 3
RIGHT_OF_LEFT_EYEBROW = 4
LEFT_OF_RIGHT_EYEBROW = 5
RIGHT_OF_RIGHT_EYEBROW = 6
MIDPOINT_BETWEEN_EYES = 7
NOSE_TIP = 8
UPPER_LIP = 9
LOWER_LIP = 10
MOUTH_LEFT = 11
MOUTH_RIGHT = 12
MOUTH_CENTER = 13
NOSE_BOTTOM_RIGHT = 14
NOSE_BOTTOM_LEFT = 15
NOSE_BOTTOM_CENTER = 16
LEFT_EYE_TOP_BOUNDARY = 17
LEFT_EYE_RIGHT_CORNER = 18
LEFT_EYE_BOTTOM_BOUNDARY = 19
LEFT_EYE_LEFT_CORNER = 20
RIGHT_EYE_TOP_BOUNDARY = 21
RIGHT_EYE_RIGHT_CORNER = 22
RIGHT_EYE_BOTTOM_BOUNDARY = 23
RIGHT_EYE_LEFT_CORNER = 24
LEFT_EYEBROW_UPPER_MIDPOINT = 25
RIGHT_EYEBROW_UPPER_MIDPOINT = 26
LEFT_EAR_TRAGION = 27
RIGHT_EAR_TRAGION = 28
LEFT_EYE_PUPIL = 29
RIGHT_EYE_PUPIL = 30
FOREHEAD_GLABELLA = 31
CHIN_GNATHION = 32
CHIN_LEFT_GONION = 33
CHIN_RIGHT_GONION = 34
class Feature(object):
class Type(enum.IntEnum):
"""
Type of image feature.
Attributes:
TYPE_UNSPECIFIED (int): Unspecified feature type.
FACE_DETECTION (int): Run face detection.
LANDMARK_DETECTION (int): Run landmark detection.
LOGO_DETECTION (int): Run logo detection.
LABEL_DETECTION (int): Run label detection.
TEXT_DETECTION (int): Run OCR.
DOCUMENT_TEXT_DETECTION (int): Run dense text document OCR. Takes precedence when both
DOCUMENT\_TEXT\_DETECTION and TEXT\_DETECTION are present.
SAFE_SEARCH_DETECTION (int): Run computer vision models to compute image safe-search properties.
IMAGE_PROPERTIES (int): Compute a set of image properties, such as the image's dominant colors.
CROP_HINTS (int): Run crop hints.
WEB_DETECTION (int): Run web detection.
"""
TYPE_UNSPECIFIED = 0
FACE_DETECTION = 1
LANDMARK_DETECTION = 2
LOGO_DETECTION = 3
LABEL_DETECTION = 4
TEXT_DETECTION = 5
DOCUMENT_TEXT_DETECTION = 11
SAFE_SEARCH_DETECTION = 6
IMAGE_PROPERTIES = 7
CROP_HINTS = 9
WEB_DETECTION = 10
class TextAnnotation(object):
class DetectedBreak(object):
class BreakType(enum.IntEnum):
"""
Enum to denote the type of break found. New line, space etc.
Attributes:
UNKNOWN (int): Unknown break label type.
SPACE (int): Regular space.
SURE_SPACE (int): Sure space (very wide).
EOL_SURE_SPACE (int): Line-wrapping break.
HYPHEN (int): End-line hyphen that is not present in text; does not co-occur with
``SPACE``, ``LEADER_SPACE``, or ``LINE_BREAK``.
LINE_BREAK (int): Line break that ends a paragraph.
"""
UNKNOWN = 0
SPACE = 1
SURE_SPACE = 2
EOL_SURE_SPACE = 3
HYPHEN = 4
LINE_BREAK = 5
| {
"content_hash": "13fac439843e1e56a98125c4385a4597",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 106,
"avg_line_length": 38.244680851063826,
"alnum_prop": 0.568567454798331,
"repo_name": "tseaver/google-cloud-python",
"id": "caf4d634339eaa9c68e1747239896d99f196074d",
"size": "7792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vision/google/cloud/vision_v1p1beta1/gapic/enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
"""Run Inception V3 benchmarks.
Tutorials: https://cloud.google.com/tpu/docs/tutorials/inception
Code: https://github.com/tensorflow/tpu/blob/master/models/experimental/inception/inception_v3.py
This benchmark is equivalent to tensorflow_benchmark with the inception3 model
except that this can target TPU.
"""
# TODO(tohaowu): We only measure image processing speed for now, and we will
# measure the other metrics in the future.
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
from perfkitbenchmarker.linux_benchmarks import resnet_benchmark
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import tensorflow
from six.moves import range
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'inception3'
BENCHMARK_CONFIG = """
inception3:
description: Runs Inception V3 Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
flags.DEFINE_float('inception3_learning_rate', 0.165, 'Learning rate.')
flags.DEFINE_integer('inception3_train_epochs', 200,
'Number of epochs use for training.', lower_bound=1)
flags.DEFINE_enum('inception3_use_data', 'real', ['real', 'fake'],
'Whether to use real or fake data. If real, the data is '
'downloaded from imagenet_data_dir. Otherwise, synthetic '
'data is generated.')
flags.DEFINE_enum('inception3_mode', 'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer('inception3_epochs_per_eval', 2,
'Number of training epochs to run between evaluations.')
flags.DEFINE_integer('inception3_save_checkpoints_secs', 0, 'Interval (in '
'seconds) at which the model data should be checkpointed. '
'Set to 0 to disable.')
flags.DEFINE_integer('inception3_train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer('inception3_eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
benchmark_spec.use_data = FLAGS.inception3_use_data
benchmark_spec.mode = FLAGS.inception3_mode
benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
benchmark_spec.data_dir = FLAGS.imagenet_data_dir
benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images
benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.train_batch_size)
benchmark_spec.train_epochs = FLAGS.inception3_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.epochs_per_eval = FLAGS.inception3_epochs_per_eval
benchmark_spec.steps_per_eval = int(
benchmark_spec.epochs_per_eval *
benchmark_spec.num_examples_per_epoch)
def Prepare(benchmark_spec):
"""Install and set up Inception V3 on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
mnist_benchmark.Prepare(benchmark_spec)
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec)
metadata.update({
'learning_rate': benchmark_spec.learning_rate,
'use_data': benchmark_spec.use_data,
'mode': benchmark_spec.mode,
'save_checkpoints_secs': benchmark_spec.save_checkpoints_secs,
'epochs_per_eval': benchmark_spec.epochs_per_eval,
'steps_per_eval': benchmark_spec.steps_per_eval,
'precision': benchmark_spec.precision,
'train_batch_size': benchmark_spec.train_batch_size,
'eval_batch_size': benchmark_spec.eval_batch_size
})
return metadata
def Run(benchmark_spec):
"""Run Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
inception3_benchmark_script = (
'tpu/models/experimental/inception/inception_v3.py')
inception3_benchmark_cmd = (
'{env_cmd} && python {script} '
'--learning_rate={learning_rate} '
'--iterations={iterations} '
'--use_tpu={use_tpu} '
'--use_data={use_data} '
'--train_steps_per_eval={steps_per_eval} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--save_checkpoints_secs={save_checkpoints_secs} '
'--train_batch_size={train_batch_size} '
'--eval_batch_size={eval_batch_size} '
'--precision={precision}'.format(
env_cmd=benchmark_spec.env_cmd,
script=inception3_benchmark_script,
learning_rate=benchmark_spec.learning_rate,
iterations=benchmark_spec.iterations,
use_tpu=bool(benchmark_spec.tpus),
use_data=benchmark_spec.use_data,
steps_per_eval=benchmark_spec.steps_per_eval,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
save_checkpoints_secs=benchmark_spec.save_checkpoints_secs,
train_batch_size=benchmark_spec.train_batch_size,
eval_batch_size=benchmark_spec.eval_batch_size,
precision=benchmark_spec.precision))
if FLAGS.tf_device == 'gpu':
inception3_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=inception3_benchmark_cmd)
samples = []
metadata = _CreateMetadataDict(benchmark_spec)
elapsed_seconds = 0
steps_per_eval = benchmark_spec.steps_per_eval
train_steps = benchmark_spec.train_steps
for step in range(steps_per_eval, train_steps + steps_per_eval,
steps_per_eval):
step = min(step, train_steps)
inception3_benchmark_cmd_step = '{cmd} --train_steps={step}'.format(
cmd=inception3_benchmark_cmd, step=step)
if benchmark_spec.mode in ('train', 'train_and_eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --mode=train {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_train_cmd,
should_log=True)
elapsed_seconds += (time.time() - start)
samples.extend(mnist_benchmark.MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, step))
if benchmark_spec.mode in ('train_and_eval', 'eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['eval'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['eval'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --mode=eval {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_eval_cmd,
should_log=True)
samples.extend(resnet_benchmark.MakeSamplesFromEvalOutput(
metadata, stdout + stderr, elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
mnist_benchmark.Cleanup(benchmark_spec)
| {
"content_hash": "c791e2cf408e22d40cb8d904c4df83c2",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 97,
"avg_line_length": 39.543103448275865,
"alnum_prop": 0.6726618705035972,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "124498826d0bf52ed8f9c512859fb5c09f914fd9",
"size": "9785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_benchmarks/inception3_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
"""Genesis Tab
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
#from pykern.pkdebug import pkdc, pkdp
from radtrack.rt_qt import QtGui
from radtrack import genesis_controller
class GenesisTab(QtGui.QWidget):
defaultTitle = 'Genesis'
acceptsFileTypes = ['in','out','dist']
task = 'Run a Genesis simulation'
category = 'simulations'
def __init__(self, parent):
super(GenesisTab, self).__init__(parent)
layout = QtGui.QVBoxLayout(self)
self.control = genesis_controller.Base()
layout.addWidget(self.control.init_widget(self))
self.setLayout(layout)
self.parent=parent
def exportToFile(self, fileName = None):
self.control.write_simulation_file(fileName)
def importFile(self, fileName = None):
with open(fileName, 'r') as f:
self.control.get_in(f)
if '__main__' == __name__:
from radtrack import rt_qt
rt_qt.run_app(lambda: GenesisTab(None))
| {
"content_hash": "258bd592fa6fc4d28d33e00fa2e6fce2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 29.25,
"alnum_prop": 0.6552706552706553,
"repo_name": "radiasoft/radtrack",
"id": "fd8d9de6b304c6ac7a66a9c5e141ed2975d44c11",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radtrack/RbGenesisTab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681"
},
{
"name": "Erlang",
"bytes": "2396"
},
{
"name": "GLSL",
"bytes": "794"
},
{
"name": "Jupyter Notebook",
"bytes": "39421"
},
{
"name": "Makefile",
"bytes": "1128"
},
{
"name": "Python",
"bytes": "1092344"
},
{
"name": "Shell",
"bytes": "35016"
},
{
"name": "Tcl",
"bytes": "55753"
}
],
"symlink_target": ""
} |
from requests_oauthlib import OAuth1Session
import json
import time
import constants
import identify as i
def query_twitter(kwd, count, times, since_id):
session = OAuth1Session(
constants.TW_CONSUMER_KEY,
constants.TW_CONSUMER_SEC,
constants.TW_ACCESS_TOKEN,
constants.TW_ACCESS_TOKEN_SEC
)
url = 'https://api.twitter.com/1.1/search/tweets.json'
print('Querying Twitter...')
res = json.loads(session.get(url, params = {'q':'%s filter:images' % kwd, 'count':count, 'result_type':'recent', 'include_entities':'true', 'since_id':since_id}).text)
if len(res['statuses']) == 0:
return [], 0
links = []
min_id = res['statuses'][0]['id']
max_id = 0
for tw in res['statuses']:
if tw ['id'] > max_id:
max_id = tw['id']
if 'extended_entities' in tw:
for media in tw['extended_entities']['media']:
links.append(media['media_url'])
else:
if 'media' in tw['entities']:
for media in tw['entities']['media']:
links.append(media['media_url'])
if tw['id'] < min_id:
min_id = tw['id']
for i in range(1, times):
res = json.loads(session.get(url, params = {'q':'%s filter:images' % kwd, 'count':count, 'result_type':'recent', 'include_entities':'true', 'since_id':since_id, 'max_id':min_id}).text)
if len(res['statuses']) == 0:
return links, max_id
min_id = res['statuses'][0]['id']
for tw in res['statuses']:
if tw ['id'] > max_id:
max_id = tw['id']
if 'extended_entities' in tw:
for media in tw['extended_entities']['media']:
links.append(media['media_url'])
else:
if 'media' in tw['entities']:
for media in tw['entities']['media']:
links.append(media['media_url'])
if tw['id'] < min_id:
min_id = tw['id']
return links, max_id
def identify(links, person_group):
personmap = {}
for k, v in constants.PEOPLE[person_group].items():
personmap[v] = []
for link in links:
people_found = i.identify_person(False, link, person_group)
if len(people_found) > 0:
for p in people_found:
personmap[p].append(link)
return personmap
| {
"content_hash": "bdafc028c68fec63efb1eb03184fe64e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 192,
"avg_line_length": 35.08450704225352,
"alnum_prop": 0.5210758731433159,
"repo_name": "kotori-sonoda/suwameter",
"id": "9c7dd6d7f56c2b13db010771036b9f9c8e2147f2",
"size": "2506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/facesearch/twittersearch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "294"
},
{
"name": "CSS",
"bytes": "292"
},
{
"name": "HTML",
"bytes": "6750"
},
{
"name": "JavaScript",
"bytes": "14813"
},
{
"name": "Python",
"bytes": "16483"
},
{
"name": "TypeScript",
"bytes": "8612"
}
],
"symlink_target": ""
} |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_required,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from .._common_conversion import (
_int_to_str,
_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from .._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from .._http import HTTPRequest
from ._error import (
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
)
from ._upload_chunking import (
_PageBlobChunkUploader,
_upload_blob_chunks,
)
from .models import (
_BlobTypes,
PageBlobProperties,
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from ._encryption import _generate_blob_encryption_data
from ._serialization import (
_get_path,
_validate_and_format_range_headers,
)
from ._deserialization import (
_convert_xml_to_page_ranges,
_parse_page_properties,
_parse_base_properties,
)
from .baseblobservice import BaseBlobService
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
_PAGE_ALIGNMENT = 512
class PageBlobService(BaseBlobService):
'''
Page blobs are a collection of 512-byte pages optimized for random read and
write operations. To create a page blob, you initialize the page blob and
specify the maximum size the page blob will grow. To add or update the
contents of a page blob, you write a page or pages by specifying an offset
and a range that align to 512-byte page boundaries. A write to a page blob
can overwrite just one page, some pages, or up to 4 MB of the page blob.
Writes to page blobs happen in-place and are immediately committed to the
blob. The maximum size for a page blob is 1 TB.
:ivar int MAX_PAGE_SIZE:
The size of the pages put by create_blob_from_* methods. Smaller pages
may be put if there is less data provided. The maximum page size the service
supports is 4MB.
'''
MAX_PAGE_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, request_session=None, connection_string=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
'''
self.blob_type = _BlobTypes.PageBlob
super(PageBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string)
def create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Creates a new Page Blob.
See create_blob_from_* for high level functions that handle the
creation and upload of large blobs with automatic chunking and
progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param int content_length:
Required. This header specifies the maximum size
for the page blob, up to 1 TB. The page blob size must be aligned
to a 512-byte boundary.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param int sequence_number:
The sequence number is a user-controlled value that you can use to
track requests. The value of the sequence number must be between 0
and 2^63 - 1.The default value is 0.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the new Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._create_blob(
container_name,
blob_name,
content_length,
content_settings=content_settings,
sequence_number=sequence_number,
metadata=metadata,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Updates a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes page:
Content of the page.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._update_page(
container_name,
blob_name,
page,
start_range,
end_range,
validate_content=validate_content,
lease_id=lease_id,
if_sequence_number_lte=if_sequence_number_lte,
if_sequence_number_lt=if_sequence_number_lt,
if_sequence_number_eq=if_sequence_number_eq,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def clear_page(
self, container_name, blob_name, start_range, end_range,
lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Clears a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'clear',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
return self._perform_request(request, _parse_page_properties)
def get_page_ranges(
self, container_name, blob_name, snapshot=None, start_range=None,
end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Returns the list of valid page ranges for a Page Blob or snapshot
of a page blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve information
from.
:param int start_range:
Start of byte range to use for getting valid page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting valid page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of valid Page Ranges for the Page Blob.
:rtype: list of :class:`~azure.storage.blob.models.PageRange`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def get_page_ranges_diff(
self, container_name, blob_name, previous_snapshot, snapshot=None,
start_range=None, end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
The response will include only the pages that are different between either a
recent snapshot or the current blob and a previous snapshot, including pages
that were cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str previous_snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a previous blob snapshot to be compared
against a more recent snapshot or the current blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a more recent blob snapshot to be compared
against a previous snapshot (previous_snapshot).
:param int start_range:
Start of byte range to use for getting different page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting different page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of different Page Ranges for the Page Blob.
:rtype: list of :class:`~azure.storage.blob.models.PageRange`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('previous_snapshot', previous_snapshot)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'prevsnapshot': _to_str(previous_snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def set_sequence_number(
self, container_name, blob_name, sequence_number_action, sequence_number=None,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Sets the blob sequence number.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str sequence_number_action:
This property indicates how the service should modify the blob's sequence
number. See :class:`.SequenceNumberAction` for more information.
:param str sequence_number:
This property sets the blob's sequence number. The sequence number is a
user-controlled property that you can use to track requests and manage
concurrency issues.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('sequence_number_action', sequence_number_action)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-sequence-number': _to_str(sequence_number),
'x-ms-sequence-number-action': _to_str(sequence_number_action),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
def resize_blob(
self, container_name, blob_name, content_length,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Resizes a page blob to the specified size. If the specified value is less
than the current size of the blob, then all pages above the specified value
are cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int content_length:
Size to resize blob to.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
#----Convenience APIs-----------------------------------------------------
def create_blob_from_path(
self, container_name, blob_name, file_path, content_settings=None,
metadata=None, validate_content=False, progress_callback=None, max_connections=2,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file path, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
progress_callback=progress_callback,
max_connections=max_connections,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
def create_blob_from_stream(
self, container_name, blob_name, stream, count, content_settings=None,
metadata=None, validate_content=False, progress_callback=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file/stream, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened file/stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is required, a page
blob cannot be created if the count is unknown.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set the blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use. Note that parallel upload
requires the stream to be seekable.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
if count < 0:
raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
if count % _PAGE_ALIGNMENT != 0:
raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
cek, iv, encryption_data = None, None, None
if self.key_encryption_key is not None:
cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
response = self._create_blob(
container_name=container_name,
blob_name=blob_name,
content_length=count,
content_settings=content_settings,
metadata=metadata,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
encryption_data=encryption_data
)
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_PAGE_SIZE,
stream=stream,
max_connections=max_connections,
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_PageBlobChunkUploader,
if_match=response.etag,
timeout=timeout,
content_encryption_key=cek,
initialization_vector=iv
)
def create_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Creates a new blob from an array of bytes, or updates the content
of an existing blob, with automatic chunking and progress
notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the byte array.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_type_bytes('blob', blob)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
progress_callback=progress_callback,
max_connections=max_connections,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
#-----Helper methods-----------------------------------------------------
def _create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
encryption_data=None):
'''
See create_blob for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
:param str _encryption_data:
The JSON formatted encryption metadata to upload as a part of the blob.
This should only be passed internally from other methods and only applied
when uploading entire blob contents immediately follows creation of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'x-ms-blob-sequence-number': _to_str(sequence_number),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
if encryption_data is not None:
request.headers['x-ms-meta-encryptiondata'] = encryption_data
return self._perform_request(request, _parse_base_properties)
def _update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
See update_page for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'update',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
request.body = _get_data_bytes_only('page', page)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_page_properties) | {
"content_hash": "fd09bd2e6d49193967bdf8d34745e775",
"timestamp": "",
"source": "github",
"line_count": 1094,
"max_line_length": 104,
"avg_line_length": 51.39396709323583,
"alnum_prop": 0.6149933303690529,
"repo_name": "dstrockis/outlook-autocategories",
"id": "eeb0e3e9a14ec4c5e7ecb33d46b93adbf2af391c",
"size": "56227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/azure/storage/blob/pageblobservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39286"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Jupyter Notebook",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "11957653"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
"""
Helper module for celery to run a worker.
.. moduleauthor:: Martijn Vermaat <martijn@vermaat.name>
.. Licensed under the MIT license, see the LICENSE file.
"""
from . import celery, create_app
# Todo: Should we make it possible to use create_reverse_proxied_app here?
create_app().app_context().push()
| {
"content_hash": "5157d233d8a7afdb489382f66424dc2a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.7243589743589743,
"repo_name": "sndrtj/varda",
"id": "5a555d2c7fe608bd6799b35da6a413f07cc8a588",
"size": "312",
"binary": false,
"copies": "1",
"ref": "refs/heads/groups",
"path": "varda/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "309231"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
def index_redirect(request):
return HttpResponseRedirect(reverse('events:up_next'))
| {
"content_hash": "ba20810faae06186788861055e79374c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 31.5,
"alnum_prop": 0.8201058201058201,
"repo_name": "qsic/qsic3",
"id": "df2bda813381912f20a4ad9995594a48a6f89b88",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "418827"
},
{
"name": "HTML",
"bytes": "673563"
},
{
"name": "JavaScript",
"bytes": "179962"
},
{
"name": "Python",
"bytes": "102793"
},
{
"name": "Ruby",
"bytes": "851"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
from oslo.config import cfg
import pbr.version
from keystone import assignment
from keystone.common import openssl
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import config
from keystone.i18n import _, _LW
from keystone import identity
from keystone.openstack.common import log
from keystone import token
LOG = log.getLogger(__name__)
CONF = config.CONF
class BaseApp(object):
name = None
@classmethod
def add_argument_parser(cls, subparsers):
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
parser.set_defaults(cmd_class=cls)
return parser
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
parser.add_argument('version', default=None, nargs='?',
help=('Migrate the database up to a specified '
'version. If not provided, db_sync will '
'migrate the database to the latest known '
'version.'))
parser.add_argument('--extension', default=None,
help=('Migrate the database for the specified '
'extension. If not provided, db_sync will '
'migrate the common repository.'))
return parser
@staticmethod
def main():
version = CONF.command.version
extension = CONF.command.extension
migration_helpers.sync_database_to_version(extension, version)
class DbVersion(BaseApp):
"""Print the current migration version of the database."""
name = 'db_version'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbVersion, cls).add_argument_parser(subparsers)
parser.add_argument('--extension', default=None,
help=('Print the migration version of the '
'database for the specified extension. If '
'not provided, print it for the common '
'repository.'))
@staticmethod
def main():
extension = CONF.command.extension
migration_helpers.print_db_version(extension)
class BaseCertificateSetup(BaseApp):
"""Common user/group setup for PKI and SSL generation."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BaseCertificateSetup,
cls).add_argument_parser(subparsers)
running_as_root = (os.geteuid() == 0)
parser.add_argument('--keystone-user', required=running_as_root)
parser.add_argument('--keystone-group', required=running_as_root)
parser.add_argument('--rebuild', default=False, action='store_true',
help=('Rebuild certificate files: erase previous '
'files and regenerate them.'))
return parser
@staticmethod
def get_user_group():
keystone_user_id = None
keystone_group_id = None
try:
a = CONF.command.keystone_user
if a:
keystone_user_id = utils.get_unix_user(a)[0]
except KeyError:
raise ValueError("Unknown user '%s' in --keystone-user" % a)
try:
a = CONF.command.keystone_group
if a:
keystone_group_id = utils.get_unix_group(a)[0]
except KeyError:
raise ValueError("Unknown group '%s' in --keystone-group" % a)
return keystone_user_id, keystone_group_id
class PKISetup(BaseCertificateSetup):
"""Set up Key pairs and certificates for token signing and verification.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'pki_setup'
@classmethod
def main(cls):
LOG.warn(_LW('keystone-manage pki_setup is not recommended for '
'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_pki.run()
class SSLSetup(BaseCertificateSetup):
"""Create key pairs and certificates for HTTPS connections.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'ssl_setup'
@classmethod
def main(cls):
LOG.warn(_LW('keystone-manage ssl_setup is not recommended for '
'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_ssl.run()
class TokenFlush(BaseApp):
"""Flush expired tokens from the backend."""
name = 'token_flush'
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.flush_expired_tokens()
class MappingPurge(BaseApp):
"""Purge the mapping table."""
name = 'mapping_purge'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPurge, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help=('Purge all mappings.'))
parser.add_argument('--domain-name', default=None,
help=('Purge any mappings for the domain '
'specified.'))
parser.add_argument('--public-id', default=None,
help=('Purge the mapping for the Public ID '
'specified.'))
parser.add_argument('--local-id', default=None,
help=('Purge the mappings for the Local ID '
'specified.'))
parser.add_argument('--type', default=None, choices=['user', 'group'],
help=('Purge any mappings for the type '
'specified.'))
return parser
@staticmethod
def main():
def validate_options():
# NOTE(henry-nash); It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
# dangerous as a default. So we use it in a slightly
# unconventional way, where all parameters are optional, but you
# must specify at least one.
if (CONF.command.all is False and
CONF.command.domain_name is None and
CONF.command.public_id is None and
CONF.command.local_id is None and
CONF.command.type is None):
raise ValueError(_('At least one option must be provided'))
if (CONF.command.all is True and
(CONF.command.domain_name is not None or
CONF.command.public_id is not None or
CONF.command.local_id is not None or
CONF.command.type is not None)):
raise ValueError(_('--all option cannot be mixed with '
'other options'))
def get_domain_id(name):
try:
identity.Manager()
assignment_manager = assignment.Manager()
return assignment_manager.driver.get_domain_by_name(name)['id']
except KeyError:
raise ValueError(_("Unknown domain '%(name)s' specified by "
"--domain-name") % {'name': name})
validate_options()
# Now that we have validated the options, we know that at least one
# option has been specified, and if it was the --all option then this
# was the only option specified.
#
# The mapping dict is used to filter which mappings are purged, so
# leaving it empty means purge them all
mapping = {}
if CONF.command.domain_name is not None:
mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
if CONF.command.public_id is not None:
mapping['public_id'] = CONF.command.public_id
if CONF.command.local_id is not None:
mapping['local_id'] = CONF.command.local_id
if CONF.command.type is not None:
mapping['type'] = CONF.command.type
mapping_manager = identity.MappingManager()
mapping_manager.driver.purge_mappings(mapping)
class SamlIdentityProviderMetadata(BaseApp):
"""Generate Identity Provider metadata."""
name = 'saml_idp_metadata'
@staticmethod
def main():
# NOTE(marek-denis): Since federation is currently an extension import
# corresponding modules only when they are really going to be used.
from keystone.contrib.federation import idp
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata.to_string())
CMDS = [
DbSync,
DbVersion,
MappingPurge,
PKISetup,
SamlIdentityProviderMetadata,
SSLSetup,
TokenFlush,
]
def add_command_parsers(subparsers):
for cmd in CMDS:
cmd.add_argument_parser(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main(argv=None, config_files=None):
CONF.register_cli_opt(command_opt)
config.configure()
sql.initialize()
config.set_default_for_default_log_levels()
CONF(args=argv[1:],
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=config_files)
config.setup_logging()
CONF.command.cmd_class.main()
| {
"content_hash": "21bb5af3b4b3071735ef115264a1dffd",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 79,
"avg_line_length": 35.16161616161616,
"alnum_prop": 0.5881451690127358,
"repo_name": "blueboxgroup/keystone",
"id": "d86349cd1e876cdfa3699d63a1abd64ba5b76c03",
"size": "11029",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5753"
},
{
"name": "Python",
"bytes": "3437662"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('otp_twilio_encrypted', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='twiliosmsdevice',
name='last_t',
field=models.BigIntegerField(default=-1, help_text='The t value of the latest verified token. The next token must be at a higher time step.'),
preserve_default=True,
),
]
| {
"content_hash": "3a8a689883fd7d0300e0991e0844716f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 154,
"avg_line_length": 27.68421052631579,
"alnum_prop": 0.6254752851711026,
"repo_name": "gustavrannestig/otp_twilio_encrypted",
"id": "e42aba264969abf8c190646f86699f7ec0f851b1",
"size": "550",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "otp_twilio_encrypted/migrations/0002_last_t.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "21163"
}
],
"symlink_target": ""
} |
"""Asserts and Boolean Checks."""
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset([
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32,
dtypes.uint64, dtypes.qint8, dtypes.qint16, dtypes.qint32, dtypes.quint8,
dtypes.quint16, dtypes.complex64, dtypes.complex128, dtypes.bfloat16
])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_near',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'assert_shapes',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a InvalidArgumentError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise errors.InvalidArgumentError(node_def=None, op=None,
message='\n'.join(data_static))
def _shape_and_dtype_str(tensor):
"""Returns a string containing tensor's shape and dtype."""
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
def _unary_assert_doc(sym, sym_name):
"""Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor.
Args:
sym: Mathematical symbol for the check performed on each element, i.e. "> 0"
sym_name: English-language name for the op described by sym
Returns:
Decorator that adds the appropriate docstring to the function for symbol
`sym`.
"""
def _decorator(func):
"""Generated decorator that adds the appropriate docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
Version of `func` with documentation attached.
"""
opname = func.__name__
cap_sym_name = sym_name.capitalize()
func.__doc__ = """
Assert the condition `x {sym}` holds element-wise.
When running in graph mode, you should add a dependency on this operation
to ensure that it runs. Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.debugging.{opname}(x, y)]):
output = tf.reduce_sum(x)
```
{sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "{opname}".
Returns:
Op that raises `InvalidArgumentError` if `x {sym}` is False.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x {sym}` is False. The check can be performed immediately during
eager execution or if `x` is statically known.
""".format(
sym=sym, sym_name=cap_sym_name, opname=opname)
return func
return _decorator
def _binary_assert_doc(sym, test_var):
"""Common docstring for most of the v1 assert_* ops that compare two tensors element-wise.
Args:
sym: Binary operation symbol, i.e. "=="
test_var: a string that represents the variable in the right-hand side of
binary operator of the test case
Returns:
Decorator that adds the appropriate docstring to the function for
symbol `sym`.
"""
def _decorator(func):
"""Generated decorator that adds the appropriate docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
A version of `func` with documentation attached.
"""
opname = func.__name__
func.__doc__ = """
Assert the condition `x {sym} y` holds element-wise.
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] {sym} y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
When running in graph mode, you should add a dependency on this operation
to ensure that it runs. Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.{opname}(x, y)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "{opname}".
Returns:
Op that raises `InvalidArgumentError` if `x {sym} y` is False.
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x {sym} y` is False. The check can be performed immediately during
eager execution or if `x` and `y` are statically known.
@compatibility(TF2)
`tf.compat.v1.{opname}` is compatible with eager execution and
`tf.function`.
Please use `tf.debugging.{opname}` instead when migrating to TF2. Apart
from `data`, all arguments are supported with the same argument name.
If you want to ensure the assert statements run before the
potentially-invalid computation, please use `tf.control_dependencies`,
as tf.function auto-control dependencies are insufficient for assert
statements.
#### Structural Mapping to Native TF2
Before:
```python
tf.compat.v1.{opname}(
x=x, y=y, data=data, summarize=summarize,
message=message, name=name)
```
After:
```python
tf.debugging.{opname}(
x=x, y=y, message=message,
summarize=summarize, name=name)
```
#### TF1 & TF2 Usage Example
TF1:
>>> g = tf.Graph()
>>> with g.as_default():
... a = tf.compat.v1.placeholder(tf.float32, [2])
... b = tf.compat.v1.placeholder(tf.float32, [2])
... result = tf.compat.v1.{opname}(a, b,
... message='"a {sym} b" does not hold for the given inputs')
... with tf.compat.v1.control_dependencies([result]):
... sum_node = a + b
>>> sess = tf.compat.v1.Session(graph=g)
>>> val = sess.run(sum_node, feed_dict={{a: [1, 2], b:{test_var}}})
TF2:
>>> a = tf.Variable([1, 2], dtype=tf.float32)
>>> b = tf.Variable({test_var}, dtype=tf.float32)
>>> assert_op = tf.debugging.{opname}(a, b, message=
... '"a {sym} b" does not hold for the given inputs')
>>> # When working with tf.control_dependencies
>>> with tf.control_dependencies([assert_op]):
... val = a + b
@end_compatibility
""".format(
sym=sym, opname=opname, test_var=test_var)
return func
return _decorator
def _binary_assert_doc_v2(sym, opname, test_var):
"""Common docstring for v2 assert_* ops that compare two tensors element-wise.
Args:
sym: Binary operation symbol, i.e. "=="
opname: Name for the symbol, i.e. "assert_equal"
test_var: A number used in the docstring example
Returns:
Decorator that adds the appropriate docstring to the function for
symbol `sym`.
"""
def _decorator(func):
"""Decorator that adds docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
A version of `func` with documentation attached.
"""
func.__doc__ = """
Assert the condition `x {sym} y` holds element-wise.
This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` {sym} `y` does not hold, `message`, as well as the first `summarize`
entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.
When using inside `tf.function`, this API takes effects during execution.
It's recommended to use this API with `tf.control_dependencies` to
ensure the correct execution order.
In the following example, without `tf.control_dependencies`, errors may
not be raised at all.
Check `tf.control_dependencies` for more details.
>>> def check_size(x):
... with tf.control_dependencies([
... tf.debugging.{opname}(tf.size(x), {test_var},
... message='Bad tensor size')]):
... return x
>>> check_size(tf.ones([2, 3], tf.float32))
Traceback (most recent call last):
...
InvalidArgumentError: ...
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message. (optional)
summarize: Print this many entries of each tensor. (optional)
name: A name for this operation (optional). Defaults to "{opname}".
Returns:
Op that raises `InvalidArgumentError` if `x {sym} y` is False. This can
be used with `tf.control_dependencies` inside of `tf.function`s to
block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x == y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
""".format(
sym=sym, opname=opname, test_var=test_var)
return func
return _decorator
def _make_assert_msg_data(sym, x, y, summarize, test_op):
"""Subroutine of _binary_assert that generates the components of the default error message when running in eager mode.
Args:
sym: Mathematical symbol for the test to apply to pairs of tensor elements,
i.e. "=="
x: First input to the assertion after applying `convert_to_tensor()`
y: Second input to the assertion
summarize: Value of the "summarize" parameter to the original assert_* call;
tells how many elements of each tensor to print.
test_op: TensorFlow op that returns a Boolean tensor with True in each
position where the assertion is satisfied.
Returns:
List of tensors and scalars that, when stringified and concatenated,
will produce the error message string.
"""
# Prepare a message with first elements of x and y.
data = []
data.append('Condition x %s y did not hold.' % sym)
if summarize > 0:
if x.shape == y.shape and x.shape.as_list():
# If the shapes of x and y are the same (and not scalars),
# Get the values that actually differed and their indices.
# If shapes are different this information is more confusing
# than useful.
mask = math_ops.logical_not(test_op)
indices = array_ops.where(mask)
indices_np = indices.numpy()
x_vals = array_ops.boolean_mask(x, mask)
y_vals = array_ops.boolean_mask(y, mask)
num_vals = min(summarize, indices_np.shape[0])
data.append('Indices of first %d different values:' % num_vals)
data.append(indices_np[:num_vals])
data.append('Corresponding x values:')
data.append(x_vals.numpy().reshape((-1,))[:num_vals])
data.append('Corresponding y values:')
data.append(y_vals.numpy().reshape((-1,))[:num_vals])
# reshape((-1,)) is the fastest way to get a flat array view.
x_np = x.numpy().reshape((-1,))
y_np = y.numpy().reshape((-1,))
x_sum = min(x_np.size, summarize)
y_sum = min(y_np.size, summarize)
data.append('First %d elements of x:' % x_sum)
data.append(x_np[:x_sum])
data.append('First %d elements of y:' % y_sum)
data.append(y_np[:y_sum])
return data
def _pretty_print(data_item, summarize):
"""Format a data item for use in an error message in eager mode.
Args:
data_item: One of the items in the "data" argument to an assert_* function.
Can be a Tensor or a scalar value.
summarize: How many elements to retain of each tensor-valued entry in data.
Returns:
An appropriate string representation of data_item
"""
if isinstance(data_item, ops.Tensor):
arr = data_item.numpy()
if np.isscalar(arr):
# Tensor.numpy() returns a scalar for zero-dimensional tensors
return str(arr)
else:
flat = arr.reshape((-1,))
lst = [str(x) for x in flat[:summarize]]
if len(lst) < flat.size:
lst.append('...')
return str(lst)
else:
return str(data_item)
def _binary_assert(sym, opname, op_func, static_func, x, y, data, summarize,
message, name):
"""Generic binary elementwise assertion.
Implements the behavior described in _binary_assert_doc() above.
Args:
sym: Mathematical symbol for the test to apply to pairs of tensor elements,
i.e. "=="
opname: Name of the assert op in the public API, i.e. "assert_equal"
op_func: Function that, if passed the two Tensor inputs to the assertion (x
and y), will return the test to be passed to reduce_all() i.e.
static_func: Function that, if passed numpy ndarray versions of the two
inputs to the assertion, will return a Boolean ndarray with containing
True in all positions where the assertion PASSES.
i.e. np.equal for assert_equal()
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to the value of
`opname`.
Returns:
See docstring template in _binary_assert_doc().
"""
with ops.name_scope(name, opname, [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
test_op = op_func(x, y)
condition = math_ops.reduce_all(test_op)
if condition:
return
# If we get here, the assertion has failed.
# Default to printing 3 elements like control_flow_ops.Assert (used
# by graph mode) does. Also treat negative values as "print
# everything" for consistency with Tensor::SummarizeValue().
if summarize is None:
summarize = 3
elif summarize < 0:
summarize = 1e9 # Code below will find exact size of x and y.
if data is None:
data = _make_assert_msg_data(sym, x, y, summarize, test_op)
if message is not None:
data = [message] + list(data)
raise errors.InvalidArgumentError(
node_def=None,
op=None,
message=('\n'.join(_pretty_print(d, summarize) for d in data)))
else: # not context.executing_eagerly()
if data is None:
data = [
'Condition x %s y did not hold element-wise:' % sym,
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
if message is not None:
data = [message] + list(data)
condition = math_ops.reduce_all(op_func(x, y))
x_static = tensor_util.constant_value(x)
y_static = tensor_util.constant_value(y)
if x_static is not None and y_static is not None:
condition_static = np.all(static_func(x_static, y_static))
_assert_static(condition_static, data)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_proper_iterable',
v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
@tf_export('debugging.assert_negative', v1=[])
@dispatch.add_dispatch_support
def assert_negative_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
This Op checks that `x[i] < 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not negative everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] < 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_negative(x=x, message=message, summarize=summarize, name=name)
@tf_export(v1=['debugging.assert_negative', 'assert_negative'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_negative')
@_unary_assert_doc('< 0', 'negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = _message_prefix(message)
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
@tf_export('debugging.assert_positive', v1=[])
@dispatch.add_dispatch_support
def assert_positive_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
This Op checks that `x[i] > 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not positive everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] > 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_positive(x=x, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_positive', 'assert_positive'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_positive')
@_unary_assert_doc('> 0', 'positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = _message_prefix(message)
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
@tf_export('debugging.assert_non_negative', v1=[])
@dispatch.add_dispatch_support
def assert_non_negative_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
This Op checks that `x[i] >= 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not >= 0 everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to
"assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative. This can
be used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] >= 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_non_negative(x=x, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_non_negative', 'assert_non_negative'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_non_negative')
@_unary_assert_doc('>= 0', 'non-negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = _message_prefix(message)
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x >= 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
@tf_export('debugging.assert_non_positive', v1=[])
@dispatch.add_dispatch_support
def assert_non_positive_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
This Op checks that `x[i] <= 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not <= 0 everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to
"assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive. This can
be used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] <= 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_non_positive(x=x, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_non_positive', 'assert_non_positive'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_non_positive')
@_unary_assert_doc('<= 0', 'non-positive')
def assert_non_positive(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = _message_prefix(message)
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x <= 0 did not hold element-wise:'
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
@tf_export('debugging.assert_equal', 'assert_equal', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('==', 'assert_equal', 3)
def assert_equal_v2(x, y, message=None, summarize=None, name=None):
return assert_equal(x=x, y=y, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_equal', 'assert_equal'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc('==', '[1, 2]')
def assert_equal(x, y, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
with ops.name_scope(name, 'assert_equal', [x, y, data]):
# Short-circuit if x and y are the same tensor.
if x is y:
return None if context.executing_eagerly() else control_flow_ops.no_op()
return _binary_assert('==', 'assert_equal', math_ops.equal, np.equal, x, y,
data, summarize, message, name)
@tf_export('debugging.assert_none_equal', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('!=', 'assert_none_equal', 6)
def assert_none_equal_v2(x, y, summarize=None, message=None, name=None):
return assert_none_equal(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_none_equal', 'assert_none_equal'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_none_equal')
@_binary_assert_doc('!=', '[2, 1]')
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('!=', 'assert_none_equal', math_ops.not_equal,
np.not_equal, x, y, data, summarize, message, name)
@tf_export('debugging.assert_near', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
def assert_near_v2(x, y, rtol=None, atol=None, message=None, summarize=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
This Op checks that `x[i] - y[i] < atol + rtol * tf.abs(y[i])` holds for every
pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are
empty, this is trivially satisfied.
If any elements of `x` and `y` are not close, `message`, as well as the first
`summarize` entries of `x` and `y` are printed, and `InvalidArgumentError`
is raised.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != 1`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same dtype as and broadcastable to `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x != y` is False for any pair of elements in `x` and `y`. The check can
be performed immediately during eager execution or if `x` and `y` are
statically known.
@compatibility(numpy)
Similar to `numpy.testing.assert_allclose`, except tolerance depends on data
type. This is due to the fact that `TensorFlow` is often used with `32bit`,
`64bit`, and even `16bit` data.
@end_compatibility
"""
return assert_near(x=x, y=y, rtol=rtol, atol=atol, summarize=summarize,
message=message, name=name)
@tf_export(v1=['debugging.assert_near', 'assert_near'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_near')
def assert_near(
x, y, rtol=None, atol=None, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_near(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have
```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.
If both `x` and `y` are empty, this is trivially satisfied.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != 1`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
@compatibility(numpy)
Similar to `numpy.testing.assert_allclose`, except tolerance depends on data
type. This is due to the fact that `TensorFlow` is often used with `32bit`,
`64bit`, and even `16bit` data.
@end_compatibility
"""
message = _message_prefix(message)
with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)
dtype = x.dtype
if dtype.is_complex:
dtype = dtype.real_dtype
eps = np.finfo(dtype.as_numpy_dtype).eps
rtol = 10 * eps if rtol is None else rtol
atol = 10 * eps if atol is None else atol
rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=dtype)
atol = ops.convert_to_tensor(atol, name='atol', dtype=dtype)
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol),
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
tol = atol + rtol * math_ops.abs(y)
diff = math_ops.abs(x - y)
condition = math_ops.reduce_all(math_ops.less(diff, tol))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_less', 'assert_less', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('<', 'assert_less', 3)
def assert_less_v2(x, y, message=None, summarize=None, name=None):
return assert_less(x=x, y=y, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_less', 'assert_less'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc('<', '[2, 3]')
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('<', 'assert_less', math_ops.less, np.less, x, y, data,
summarize, message, name)
@tf_export('debugging.assert_less_equal', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('<=', 'assert_less_equal', 3)
def assert_less_equal_v2(x, y, message=None, summarize=None, name=None):
return assert_less_equal(x=x, y=y,
summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_less_equal', 'assert_less_equal'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_less_equal')
@_binary_assert_doc('<=', '[1, 3]')
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('<=', 'assert_less_equal', math_ops.less_equal,
np.less_equal, x, y, data, summarize, message, name)
@tf_export('debugging.assert_greater', 'assert_greater', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('>', 'assert_greater', 9)
def assert_greater_v2(x, y, message=None, summarize=None, name=None):
return assert_greater(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_greater', 'assert_greater'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc('>', '[0, 1]')
def assert_greater(x, y, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
return _binary_assert('>', 'assert_greater', math_ops.greater, np.greater, x,
y, data, summarize, message, name)
@tf_export('debugging.assert_greater_equal', v1=[])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@_binary_assert_doc_v2('>=', 'assert_greater_equal', 9)
def assert_greater_equal_v2(x, y, message=None, summarize=None, name=None):
return assert_greater_equal(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_greater_equal', 'assert_greater_equal'])
@dispatch.register_binary_elementwise_assert_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_greater_equal')
@_binary_assert_doc('>=', '[1, 0]')
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
return _binary_assert('>=', 'assert_greater_equal', math_ops.greater_equal,
np.greater_equal, x, y, data, summarize, message, name)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank] and return
`True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_rank', 'assert_rank', v1=[])
@dispatch.add_dispatch_support
def assert_rank_v2(x, rank, message=None, name=None):
"""Assert that `x` has rank equal to `rank`.
This Op checks that the rank of `x` is equal to `rank`.
If `x` has a different rank, `message`, as well as the shape of `x` are
printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
rank: Scalar integer `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x` does not have rank `rank`. The check can be performed immediately
during eager execution or if the shape of `x` is statically known.
"""
return assert_rank(x=x, rank=rank, message=message, name=name)
@tf_export(v1=['debugging.assert_rank', 'assert_rank'])
@dispatch.add_dispatch_support
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and the shape of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = _message_prefix(message)
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank' % name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%sTensor %s must have rank %d. Received rank %d, shape %s' %
(message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise ValueError(e.args[0])
return assert_op
@tf_export('debugging.assert_rank_at_least', v1=[])
@dispatch.add_dispatch_support
def assert_rank_at_least_v2(x, rank, message=None, name=None):
"""Assert that `x` has rank of at least `rank`.
This Op checks that the rank of `x` is greater or equal to `rank`.
If `x` has a rank lower than `rank`, `message`, as well as the shape of `x`
are printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
rank: Scalar integer `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: `x` does not have rank at least `rank`, but the rank
cannot be statically determined.
ValueError: If static checks determine `x` has mismatched rank.
"""
return assert_rank_at_least(x=x, rank=rank, message=message, name=name)
@tf_export(v1=['debugging.assert_rank_at_least', 'assert_rank_at_least'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_rank_at_least')
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = _message_prefix(message)
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if context.executing_eagerly():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank at least' % name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%sTensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if not any(r is None for r in ranks_static):
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_rank_in', v1=[])
@dispatch.add_dispatch_support
def assert_rank_in_v2(x, ranks, message=None, name=None):
"""Assert that `x` has a rank in `ranks`.
This Op checks that the rank of `x` is in `ranks`.
If `x` has a different rank, `message`, as well as the shape of `x` are
printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
ranks: `Iterable` of scalar `Tensor` objects.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: `x` does not have rank in `ranks`, but the rank cannot
be statically determined.
ValueError: If static checks determine `x` has mismatched rank.
"""
return assert_rank_in(x=x, ranks=ranks, message=message, name=name)
@tf_export(v1=['debugging.assert_rank_in', 'assert_rank_in'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_rank_in')
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = _message_prefix(message)
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [
message, 'Tensor %s must have rank in' % name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%sTensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export('debugging.assert_integer', v1=[])
@dispatch.add_dispatch_support
def assert_integer_v2(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
If `x` has a non-integer type, `message`, as well as the dtype of `x` are
printed, and `InvalidArgumentError` is raised.
This can always be checked statically, so this method returns nothing.
Args:
x: A `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is not a non-quantized integer type.
"""
assert_integer(x=x, message=message, name=name)
@tf_export(v1=['debugging.assert_integer', 'assert_integer'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_integer')
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.executing_eagerly():
name = 'tensor'
else:
name = x.name
err_msg = (
'%sExpected "x" to be integer type. Found: %s of dtype %s'
% (_message_prefix(message), name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
@tf_export('debugging.assert_type', v1=[])
@dispatch.add_dispatch_support
def assert_type_v2(tensor, tf_type, message=None, name=None):
"""Asserts that the given `Tensor` is of the specified type.
This can always be checked statically, so this method returns nothing.
Example:
>>> a = tf.Variable(1.0)
>>> tf.debugging.assert_type(a, tf_type= tf.float32)
>>> b = tf.constant(21)
>>> tf.debugging.assert_type(b, tf_type=tf.bool)
Traceback (most recent call last):
...
TypeError: ...
>>> c = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2],
... dense_shape=[3, 4])
>>> tf.debugging.assert_type(c, tf_type= tf.int32)
Args:
tensor: A `Tensor`, `SparseTensor` or `tf.Variable` .
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name for this operation. Defaults to "assert_type"
Raises:
TypeError: If the tensor's data type doesn't match `tf_type`.
"""
assert_type(tensor=tensor, tf_type=tf_type, message=message, name=name)
@tf_export(v1=['debugging.assert_type', 'assert_type'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_type')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A `Tensor` or `SparseTensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
tf_type = dtypes.as_dtype(tf_type)
with ops.name_scope(name, 'assert_type', [tensor]):
if not isinstance(tensor, sparse_tensor.SparseTensor):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
raise TypeError(
f'{_message_prefix(message)}{getattr(tensor, "name", "tensor")}'
f' must be of type {tf_type!r}; got {tensor.dtype!r}')
return control_flow_ops.no_op('statically_determined_correct_type')
def _dimension_sizes(x):
"""Gets the dimension sizes of a tensor `x`.
If a size can be determined statically it is returned as an integer,
otherwise as a tensor.
If `x` is a scalar it is treated as rank 1 size 1.
Args:
x: A `Tensor`.
Returns:
Dimension sizes.
"""
dynamic_shape = array_ops.shape(x)
rank = x.get_shape().rank
rank_is_known = rank is not None
if rank_is_known and rank == 0:
return (1,)
if rank_is_known and rank > 0:
static_shape = x.get_shape().as_list()
sizes = [
int(size) if size is not None else dynamic_shape[i]
for i, size in enumerate(static_shape)
]
return sizes
has_rank_zero = math_ops.equal(array_ops.rank(x), 0)
return control_flow_ops.cond(
has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape)
def _symbolic_dimension_sizes(symbolic_shape):
# If len(symbolic_shape) == 0 construct a tuple
if not symbolic_shape:
return tuple([1])
return symbolic_shape
def _has_known_value(dimension_size):
not_none = dimension_size is not None
try:
int(dimension_size)
can_be_parsed_as_int = True
except (ValueError, TypeError):
can_be_parsed_as_int = False
return not_none and can_be_parsed_as_int
def _is_symbol_for_any_size(symbol):
return symbol in [None, '.']
_TensorDimSizes = collections.namedtuple(
'_TensorDimSizes',
['x', 'unspecified_dim', 'actual_sizes', 'symbolic_sizes'])
@tf_export('debugging.assert_shapes', v1=[])
@dispatch.add_dispatch_support
def assert_shapes_v2(shapes, data=None, summarize=None, message=None,
name=None):
"""Assert tensor shapes and dimension size relationships between tensors.
This Op checks that a collection of tensors shape relationships
satisfies given constraints.
Example:
>>> n = 10
>>> q = 3
>>> d = 7
>>> x = tf.zeros([n,q])
>>> y = tf.ones([n,d])
>>> param = tf.Variable([1.0, 2.0, 3.0])
>>> scalar = 1.0
>>> tf.debugging.assert_shapes([
... (x, ('N', 'Q')),
... (y, ('N', 'D')),
... (param, ('Q',)),
... (scalar, ()),
... ])
>>> tf.debugging.assert_shapes([
... (x, ('N', 'D')),
... (y, ('N', 'D'))
... ])
Traceback (most recent call last):
...
ValueError: ...
If `x`, `y`, `param` or `scalar` does not have a shape that satisfies
all specified constraints, `message`, as well as the first `summarize` entries
of the first encountered violating tensor are printed, and
`InvalidArgumentError` is raised.
Size entries in the specified shapes are checked against other entries by
their __hash__, except:
- a size entry is interpreted as an explicit size if it can be parsed as an
integer primitive.
- a size entry is interpreted as *any* size if it is None or '.'.
If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates
a variable number of outer dimensions of unspecified size, i.e. the constraint
applies to the inner-most dimensions only.
Scalar tensors and specified shapes of length zero (excluding the 'inner-most'
prefix) are both treated as having a single dimension of size one.
Args:
shapes: dictionary with (`Tensor` to shape) items, or a list of
(`Tensor`, shape) tuples. A shape must be an iterable.
data: The tensors to print out if the condition is False. Defaults to error
message and first few entries of the violating tensor.
summarize: Print this many entries of the tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_shapes".
Raises:
ValueError: If static checks determine any shape constraint is violated.
"""
assert_shapes(
shapes, data=data, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_shapes'])
@dispatch.add_dispatch_support
def assert_shapes(shapes, data=None, summarize=None, message=None, name=None):
"""Assert tensor shapes and dimension size relationships between tensors.
This Op checks that a collection of tensors shape relationships
satisfies given constraints.
Example:
>>> n = 10
>>> q = 3
>>> d = 7
>>> x = tf.zeros([n,q])
>>> y = tf.ones([n,d])
>>> param = tf.Variable([1.0, 2.0, 3.0])
>>> scalar = 1.0
>>> tf.debugging.assert_shapes([
... (x, ('N', 'Q')),
... (y, ('N', 'D')),
... (param, ('Q',)),
... (scalar, ()),
... ])
>>> tf.debugging.assert_shapes([
... (x, ('N', 'D')),
... (y, ('N', 'D'))
... ])
Traceback (most recent call last):
...
ValueError: ...
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_shapes(shapes)]):
output = tf.matmul(x, y, transpose_a=True)
```
If `x`, `y`, `param` or `scalar` does not have a shape that satisfies
all specified constraints, `message`, as well as the first `summarize` entries
of the first encountered violating tensor are printed, and
`InvalidArgumentError` is raised.
Size entries in the specified shapes are checked against other entries by
their __hash__, except:
- a size entry is interpreted as an explicit size if it can be parsed as an
integer primitive.
- a size entry is interpreted as *any* size if it is None or '.'.
If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates
a variable number of outer dimensions of unspecified size, i.e. the constraint
applies to the inner-most dimensions only.
Scalar tensors and specified shapes of length zero (excluding the 'inner-most'
prefix) are both treated as having a single dimension of size one.
Args:
shapes: A list of (`Tensor`, `shape`) tuples, wherein `shape` is the
expected shape of `Tensor`. See the example code above. The `shape` must
be an iterable. Each element of the iterable can be either a concrete
integer value or a string that abstractly represents the dimension.
For example,
- `('N', 'Q')` specifies a 2D shape wherein the first and second
dimensions of shape may or may not be equal.
- `('N', 'N', 'Q')` specifies a 3D shape wherein the first and second
dimensions are equal.
- `(1, 'N')` specifies a 2D shape wherein the first dimension is
exactly 1 and the second dimension can be any value.
Note that the abstract dimension letters take effect across different
tuple elements of the list. For example,
`tf.debugging.assert_shapes([(x, ('N', 'A')), (y, ('N', 'B'))]` asserts
that both `x` and `y` are rank-2 tensors and their first dimensions are
equal (`N`).
`shape` can also be a `tf.TensorShape`.
data: The tensors to print out if the condition is False. Defaults to error
message and first few entries of the violating tensor.
summarize: Print this many entries of the tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_shapes".
Returns:
Op raising `InvalidArgumentError` unless all shape constraints are
satisfied.
If static checks determine all constraints are satisfied, a `no_op` is
returned.
Raises:
ValueError: If static checks determine any shape constraint is violated.
"""
# If the user manages to assemble a dict containing tensors (possible in
# Graph mode only), make sure we still accept that.
if isinstance(shapes, dict):
shapes = shapes.items()
message_prefix = _message_prefix(message)
with ops.name_scope(name, 'assert_shapes', [shapes, data]):
# Shape specified as None implies no constraint
shape_constraints = [(x if isinstance(x, sparse_tensor.SparseTensor) else
ops.convert_to_tensor(x), s)
for x, s in shapes if s is not None]
executing_eagerly = context.executing_eagerly()
def tensor_name(x):
if executing_eagerly or isinstance(x, sparse_tensor.SparseTensor):
return _shape_and_dtype_str(x)
return x.name
tensor_dim_sizes = []
for tensor, symbolic_shape in shape_constraints:
is_iterable = (
hasattr(symbolic_shape, '__iter__') or
hasattr(symbolic_shape, '__getitem__') # For Python 2 compat.
)
if not is_iterable:
raise ValueError(
'%s'
'Tensor %s. Specified shape must be an iterable. '
'An iterable has the attribute `__iter__` or `__getitem__`. '
'Received specified shape: %s' %
(message_prefix, tensor_name(tensor), symbolic_shape))
# We convert this into a tuple to handle strings, lists and numpy arrays
symbolic_shape_tuple = tuple(symbolic_shape)
tensors_specified_innermost = False
for i, symbol in enumerate(symbolic_shape_tuple):
if symbol not in [Ellipsis, '*']:
continue
if i != 0:
raise ValueError(
'%s'
'Tensor %s specified shape index %d. '
'Symbol `...` or `*` for a variable number of '
'unspecified dimensions is only allowed as the first entry' %
(message_prefix, tensor_name(tensor), i))
tensors_specified_innermost = True
# Only include the size of the specified dimensions since the 0th symbol
# is either ellipsis or *
tensor_dim_sizes.append(
_TensorDimSizes(
tensor, tensors_specified_innermost, _dimension_sizes(tensor),
_symbolic_dimension_sizes(
symbolic_shape_tuple[1:]
if tensors_specified_innermost else symbolic_shape_tuple)))
rank_assertions = []
for sizes in tensor_dim_sizes:
rank = len(sizes.symbolic_sizes)
rank_zero_or_one = rank in [0, 1]
if sizes.unspecified_dim:
if rank_zero_or_one:
# No assertion of rank needed as `x` only need to have rank at least
# 0. See elif rank_zero_or_one case comment.
continue
assertion = assert_rank_at_least(
x=sizes.x,
rank=rank,
data=data,
summarize=summarize,
message=message,
name=name)
elif rank_zero_or_one:
# Rank 0 is treated as rank 1 size 1, i.e. there is
# no distinction between the two in terms of rank.
# See _dimension_sizes.
assertion = assert_rank_in(
x=sizes.x,
ranks=[0, 1],
data=data,
summarize=summarize,
message=message,
name=name)
else:
assertion = assert_rank(
x=sizes.x,
rank=rank,
data=data,
summarize=summarize,
message=message,
name=name)
rank_assertions.append(assertion)
size_assertions = []
size_specifications = {}
for sizes in tensor_dim_sizes:
for i, size_symbol in enumerate(sizes.symbolic_sizes):
if _is_symbol_for_any_size(size_symbol):
# Size specified as any implies no constraint
continue
if sizes.unspecified_dim:
tensor_dim = i - len(sizes.symbolic_sizes)
else:
tensor_dim = i
if size_symbol in size_specifications or _has_known_value(size_symbol):
if _has_known_value(size_symbol):
specified_size = int(size_symbol)
size_check_message = 'Specified explicitly'
else:
specified_size, specified_by_y, specified_at_dim = (
size_specifications[size_symbol])
size_check_message = (
'Specified by tensor %s dimension %d' %
(tensor_name(specified_by_y), specified_at_dim))
# This is extremely subtle. If actual_sizes is dynamic, we must
# make sure a control dependency is inserted here so that this slice
# can not execute until the rank is asserted to be enough for the
# slice to not fail.
with ops.control_dependencies(rank_assertions):
actual_size = sizes.actual_sizes[tensor_dim]
if _has_known_value(actual_size) and _has_known_value(specified_size):
if int(actual_size) != int(specified_size):
raise ValueError(
'%s%s. Tensor %s dimension %s must have size %d. '
'Received size %d, shape %s' %
(message_prefix, size_check_message, tensor_name(sizes.x),
tensor_dim, specified_size, actual_size,
sizes.x.get_shape()))
# No dynamic assertion needed
continue
condition = math_ops.equal(
ops.convert_to_tensor(actual_size),
ops.convert_to_tensor(specified_size))
data_ = data
if data is None:
data_ = [
message_prefix, size_check_message,
'Tensor %s dimension' % tensor_name(sizes.x), tensor_dim,
'must have size', specified_size, 'Received shape: ',
array_ops.shape(sizes.x)
]
size_assertions.append(
control_flow_ops.Assert(condition, data_, summarize=summarize))
else:
# Not sure if actual_sizes is a constant, but for safety, guard
# on rank. See explanation above about actual_sizes need for safety.
with ops.control_dependencies(rank_assertions):
size = sizes.actual_sizes[tensor_dim]
size_specifications[size_symbol] = (size, sizes.x, tensor_dim)
# Ensure both assertions actually occur.
with ops.control_dependencies(rank_assertions):
shapes_assertion = control_flow_ops.group(size_assertions)
return shapes_assertion
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
@tf_export(
'debugging.is_numeric_tensor',
v1=['debugging.is_numeric_tensor', 'is_numeric_tensor'])
@deprecation.deprecated_endpoints('is_numeric_tensor')
def is_numeric_tensor(tensor):
"""Returns `True` if the elements of `tensor` are numbers.
Specifically, returns `True` if the dtype of `tensor` is one of the following:
* `tf.float16`
* `tf.float32`
* `tf.float64`
* `tf.int8`
* `tf.int16`
* `tf.int32`
* `tf.int64`
* `tf.uint8`
* `tf.uint16`
* `tf.uint32`
* `tf.uint64`
* `tf.qint8`
* `tf.qint16`
* `tf.qint32`
* `tf.quint8`
* `tf.quint16`
* `tf.complex64`
* `tf.complex128`
* `tf.bfloat16`
Returns `False` if `tensor` is of a non-numeric type or if `tensor` is not
a `tf.Tensor` object.
"""
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
@tf_export(
'math.is_non_decreasing',
v1=[
'math.is_non_decreasing', 'debugging.is_non_decreasing',
'is_non_decreasing'
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('debugging.is_non_decreasing',
'is_non_decreasing')
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
>>> x1 = tf.constant([1.0, 1.0, 3.0])
>>> tf.math.is_non_decreasing(x1)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> x2 = tf.constant([3.0, 1.0, 2.0])
>>> tf.math.is_non_decreasing(x2)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
@tf_export(
'math.is_strictly_increasing',
v1=[
'math.is_strictly_increasing', 'debugging.is_strictly_increasing',
'is_strictly_increasing'
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('debugging.is_strictly_increasing',
'is_strictly_increasing')
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
>>> x1 = tf.constant([1.0, 2.0, 3.0])
>>> tf.math.is_strictly_increasing(x1)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> x2 = tf.constant([3.0, 1.0, 2.0])
>>> tf.math.is_strictly_increasing(x2)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_expected_type = expected_type
mismatch = False
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
elif expected_type != item_type:
mismatch = True
break
if mismatch:
# Loop back through and build up an informative error message (this is very
# slow, so we don't do it unless we found an error above).
expected_type = original_expected_type
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type # Should be unreachable
else:
return expected_type
@tf_export(
'debugging.assert_same_float_dtype',
v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_same_float_dtype')
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
@tf_export('debugging.assert_scalar', v1=[])
@dispatch.add_dispatch_support
def assert_scalar_v2(tensor, message=None, name=None):
"""Asserts that the given `tensor` is a scalar.
This function raises `ValueError` unless it can be certain that the given
`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is
unknown.
This is always checked statically, so this method returns nothing.
Args:
tensor: A `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation. Defaults to "assert_scalar"
Raises:
ValueError: If the tensor is not scalar (rank 0), or if its shape is
unknown.
"""
assert_scalar(tensor=tensor, message=message, name=name)
@tf_export(v1=['debugging.assert_scalar', 'assert_scalar'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_scalar')
def assert_scalar(tensor, name=None, message=None):
"""Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).
This function raises `ValueError` unless it can be certain that the given
`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is
unknown.
Args:
tensor: A `Tensor`.
name: A name for this operation. Defaults to "assert_scalar"
message: A string to prefix to the default message.
Returns:
The input tensor (potentially converted to a `Tensor`).
Raises:
ValueError: If the tensor is not scalar (rank 0), or if its shape is
unknown.
"""
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
message = _message_prefix(message)
if shape.ndims != 0:
if context.executing_eagerly():
raise ValueError('%sExpected scalar shape, saw shape: %s.'
% (message, shape,))
else:
raise ValueError('%sExpected scalar shape for %s, saw shape: %s.'
% (message, tensor.name, shape))
return tensor
def _message_prefix(message):
if message:
return '%s. ' % message
return ''
@tf_export('ensure_shape')
@dispatch.add_dispatch_support
def ensure_shape(x, shape, name=None):
"""Updates the shape of a tensor and checks at runtime that the shape holds.
When executed, this operation asserts that the input tensor `x`'s shape
is compatible with the `shape` argument.
See `tf.TensorShape.is_compatible_with` for details.
>>> x = tf.constant([[1, 2, 3],
... [4, 5, 6]])
>>> x = tf.ensure_shape(x, [2, 3])
Use `None` for unknown dimensions:
>>> x = tf.ensure_shape(x, [None, 3])
>>> x = tf.ensure_shape(x, [2, None])
If the tensor's shape is not compatible with the `shape` argument, an error
is raised:
>>> x = tf.ensure_shape(x, [5])
Traceback (most recent call last):
...
tf.errors.InvalidArgumentError: Shape of tensor dummy_input [3] is not
compatible with expected shape [5]. [Op:EnsureShape]
During graph construction (typically tracing a `tf.function`),
`tf.ensure_shape` updates the static-shape of the **result** tensor by
merging the two shapes. See `tf.TensorShape.merge_with` for details.
This is most useful when **you** know a shape that can't be determined
statically by TensorFlow.
The following trivial `tf.function` prints the input tensor's
static-shape before and after `ensure_shape` is applied.
>>> @tf.function
... def f(tensor):
... print("Static-shape before:", tensor.shape)
... tensor = tf.ensure_shape(tensor, [None, 3])
... print("Static-shape after:", tensor.shape)
... return tensor
This lets you see the effect of `tf.ensure_shape` when the function is traced:
>>> cf = f.get_concrete_function(tf.TensorSpec([None, None]))
Static-shape before: (None, None)
Static-shape after: (None, 3)
>>> cf(tf.zeros([3, 3])) # Passes
>>> cf(tf.constant([1, 2, 3])) # fails
Traceback (most recent call last):
...
InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3].
The above example raises `tf.errors.InvalidArgumentError`, because `x`'s
shape, `(3,)`, is not compatible with the `shape` argument, `(None, 3)`
Inside a `tf.function` or `v1.Graph` context it checks both the buildtime and
runtime shapes. This is stricter than `tf.Tensor.set_shape` which only
checks the buildtime shape.
Note: This differs from `tf.Tensor.set_shape` in that it sets the static shape
of the resulting tensor and enforces it at runtime, raising an error if the
tensor's runtime shape is incompatible with the specified shape.
`tf.Tensor.set_shape` sets the static shape of the tensor without enforcing it
at runtime, which may result in inconsistencies between the statically-known
shape of tensors and the runtime value of tensors.
For example, of loading images of a known size:
>>> @tf.function
... def decode_image(png):
... image = tf.image.decode_png(png, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... image = tf.ensure_shape(image,[28, 28, 3])
... print("Final shape: ", image.shape)
... return image
When tracing a function, no ops are being executed, shapes may be unknown.
See the [Concrete Functions Guide](https://www.tensorflow.org/guide/concrete_function)
for details.
>>> concrete_decode = decode_image.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: (None, None, 3)
Final shape: (28, 28, 3)
>>> image = tf.random.uniform(maxval=255, shape=[28, 28, 3], dtype=tf.int32)
>>> image = tf.cast(image,tf.uint8)
>>> png = tf.image.encode_png(image)
>>> image2 = concrete_decode(png)
>>> print(image2.shape)
(28, 28, 3)
>>> image = tf.concat([image,image], axis=0)
>>> print(image.shape)
(56, 28, 3)
>>> png = tf.image.encode_png(image)
>>> image2 = concrete_decode(png)
Traceback (most recent call last):
...
tf.errors.InvalidArgumentError: Shape of tensor DecodePng [56,28,3] is not
compatible with expected shape [28,28,3].
Caution: if you don't use the result of `tf.ensure_shape` the check may not
run.
>>> @tf.function
... def bad_decode_image(png):
... image = tf.image.decode_png(png, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... # BAD: forgot to use the returned tensor.
... tf.ensure_shape(image,[28, 28, 3])
... print("Final shape: ", image.shape)
... return image
>>> image = bad_decode_image(png)
Initial shape: (None, None, 3)
Final shape: (None, None, 3)
>>> print(image.shape)
(56, 28, 3)
Args:
x: A `Tensor`.
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
name: A name for this operation (optional). Defaults to "EnsureShape".
Returns:
A `Tensor`. Has the same type and contents as `x`.
Raises:
tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape
of `x`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
return array_ops.ensure_shape(x, shape, name=name)
@ops.RegisterGradient('EnsureShape')
def _ensure_shape_grad(op, grad):
del op # Unused.
return grad
| {
"content_hash": "3faf2b1def69f3652655e56c360d0445",
"timestamp": "",
"source": "github",
"line_count": 2347,
"max_line_length": 120,
"avg_line_length": 35.96335747763102,
"alnum_prop": 0.6581641115560505,
"repo_name": "Intel-tensorflow/tensorflow",
"id": "884816b65acfee49b1a2d3f0d968005f22915f7e",
"size": "85143",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/check_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099634"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621917"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from builtins import str
import csv
from django.http import HttpResponse
# Admin action for a generic "CSV Export"
# Django snippets: http://djangosnippets.org/snippets/2369/
def export_as_csv_action(description="Export selected objects as CSV file",
fields=None, exclude=None, header=True):
"""
This function returns an export csv action
'fields' and 'exclude' work like in django ModelForm
'header' is whether or not to output the column names as the first row
"""
def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
if fields:
field_names = fields
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(mimetype='text/csv')
response[
'Content-Disposition'] = 'attachment; filename="%s.csv"' % str(
opts).replace('.', '_')
writer = csv.writer(response)
if header:
writer.writerow(list(field_names))
for obj in queryset:
writer.writerow(
[str(getattr(obj, field)).encode("utf-8", "replace") for
field in field_names])
return response
export_as_csv.short_description = description
return export_as_csv
| {
"content_hash": "993f368ed6f9a3cc87fcfd321c48ded4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 33.75555555555555,
"alnum_prop": 0.6122448979591837,
"repo_name": "onepercentclub/bluebottle",
"id": "f56ffc575240ce97737a50f6df08841995ed0d0e",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/redirects/actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import datetime
from .. import states
from ..db.models import Task, TaskSet
from ..db.session import ResultSession
from ..exceptions import ImproperlyConfigured
from ..utils.timeutils import maybe_timedelta
from .base import BaseDictBackend
def _sqlalchemy_installed():
try:
import sqlalchemy
except ImportError:
raise ImproperlyConfigured(
"The database result backend requires SQLAlchemy to be installed."
"See http://pypi.python.org/pypi/SQLAlchemy")
return sqlalchemy
_sqlalchemy_installed()
class DatabaseBackend(BaseDictBackend):
"""The database result backend."""
# ResultSet.iterate should sleep this much between each pool,
# to not bombard the database with queries.
subpolling_interval = 0.5
def __init__(self, dburi=None, expires=None,
engine_options=None, **kwargs):
super(DatabaseBackend, self).__init__(**kwargs)
conf = self.app.conf
self.expires = maybe_timedelta(self.prepare_expires(expires))
self.dburi = dburi or conf.CELERY_RESULT_DBURI
self.engine_options = dict(engine_options or {},
**conf.CELERY_RESULT_ENGINE_OPTIONS or {})
self.short_lived_sessions = kwargs.get("short_lived_sessions",
conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS)
if not self.dburi:
raise ImproperlyConfigured(
"Missing connection string! Do you have "
"CELERY_RESULT_DBURI set to a real value?")
def ResultSession(self):
return ResultSession(
dburi=self.dburi,
short_lived_sessions=self.short_lived_sessions,
**self.engine_options)
def _store_result(self, task_id, result, status, traceback=None):
"""Store return value and status of an executed task."""
session = self.ResultSession()
try:
task = session.query(Task).filter(Task.task_id == task_id).first()
if not task:
task = Task(task_id)
session.add(task)
session.flush()
task.result = result
task.status = status
task.traceback = traceback
session.commit()
finally:
session.close()
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
session = self.ResultSession()
try:
task = session.query(Task).filter(Task.task_id == task_id).first()
if task is None:
task = Task(task_id)
task.status = states.PENDING
task.result = None
return task.to_dict()
finally:
session.close()
def _save_taskset(self, taskset_id, result):
"""Store the result of an executed taskset."""
session = self.ResultSession()
try:
taskset = TaskSet(taskset_id, result)
session.add(taskset)
session.flush()
session.commit()
return result
finally:
session.close()
def _restore_taskset(self, taskset_id):
"""Get metadata for taskset by id."""
session = self.ResultSession()
try:
taskset = session.query(TaskSet).filter(
TaskSet.taskset_id == taskset_id).first()
if taskset:
return taskset.to_dict()
finally:
session.close()
def _delete_taskset(self, taskset_id):
"""Delete metadata for taskset by id."""
session = self.ResultSession()
try:
session.query(TaskSet).filter(
TaskSet.taskset_id == taskset_id).delete()
session.flush()
session.commit()
finally:
session.close()
def _forget(self, task_id):
"""Forget about result."""
session = self.ResultSession()
try:
session.query(Task).filter(Task.task_id == task_id).delete()
session.commit()
finally:
session.close()
def cleanup(self):
"""Delete expired metadata."""
session = self.ResultSession()
expires = self.expires
try:
session.query(Task).filter(
Task.date_done < (datetime.now() - expires)).delete()
session.query(TaskSet).filter(
TaskSet.date_done < (datetime.now() - expires)).delete()
session.commit()
finally:
session.close()
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(dburi=self.dburi,
expires=self.expires,
engine_options=self.engine_options))
return super(DatabaseBackend, self).__reduce__(args, kwargs)
| {
"content_hash": "239a31584e9565433481c22f91983f89",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 34.51048951048951,
"alnum_prop": 0.5657548125633232,
"repo_name": "KarimAllah/celery",
"id": "c93086774051b00a7c259125d438f41f628fa770",
"size": "4959",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "celery/backends/database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1077395"
},
{
"name": "Shell",
"bytes": "40914"
}
],
"symlink_target": ""
} |
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import flags
import tensorflow.compat.v1 as tf
flags.DEFINE_float('cb_distortion_range', 0.1, 'Cb distortion range +/-')
flags.DEFINE_float('cr_distortion_range', 0.1, 'Cr distortion range +/-')
flags.DEFINE_boolean(
'use_fast_color_distort', True,
'apply fast color/chroma distortion if True, else apply'
'brightness/saturation/hue/contrast distortion')
FLAGS = flags.FLAGS
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
pairs = []
for i in range(num_cases):
def _apply(i_value=i):
return func(x, i_value)
pairs.append((tf.equal(sel, i), _apply))
return tf.case(pairs)
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distort_color_fast(image, scope=None):
"""Distort the color of a Tensor image.
Distort brightness and chroma values of input image
Args:
image: 3-D Tensor containing single image in [0, 1].
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
"""
with tf.name_scope(scope, 'distort_color', [image]):
br_delta = tf.random_uniform([], -32. / 255., 32. / 255., seed=None)
cb_factor = tf.random_uniform([],
-FLAGS.cb_distortion_range,
FLAGS.cb_distortion_range,
seed=None)
cr_factor = tf.random_uniform([],
-FLAGS.cr_distortion_range,
FLAGS.cr_distortion_range,
seed=None)
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
red_offset = 1.402 * cr_factor + br_delta
green_offset = -0.344136 * cb_factor - 0.714136 * cr_factor + br_delta
blue_offset = 1.772 * cb_factor + br_delta
channels[0] += red_offset
channels[1] += green_offset
channels[2] += blue_offset
image = tf.concat(axis=2, values=channels)
image = tf.clip_by_value(image, 0., 1.)
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4., 4. / 3.),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as [ymin,
xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area
of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image must
contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image,
height,
width,
bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as [ymin,
xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if add_image_summaries:
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
if add_image_summaries:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
if FLAGS.use_fast_color_distort:
distorted_image = distort_color_fast(distorted_image)
else:
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image,
height,
width,
central_fraction=0.875,
scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(
image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
image.set_shape([height, width, 3])
return image
def preprocess_image(image,
output_height,
output_width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
output_height: integer, image expected height.
output_width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as [ymin,
xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(
image,
output_height,
output_width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(image, output_height, output_width)
| {
"content_hash": "aa97ccc3d17b3f458d6ebd8a4d2d299d",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 80,
"avg_line_length": 41.119680851063826,
"alnum_prop": 0.6498286009960546,
"repo_name": "tensorflow/tpu",
"id": "12ff79f755460890c4ddb231c598d3c84ffb4bfe",
"size": "16150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/official/mobilenet/inception_preprocessing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""Fixtures for IntelliFire integration tests."""
from collections.abc import Generator
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from aiohttp.client_reqrep import ConnectionKey
import pytest
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.intellifire.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
@pytest.fixture()
def mock_fireplace_finder_none() -> Generator[None, MagicMock, None]:
"""Mock fireplace finder."""
mock_found_fireplaces = Mock()
mock_found_fireplaces.ips = []
with patch(
"homeassistant.components.intellifire.config_flow.AsyncUDPFireplaceFinder.search_fireplace"
):
yield mock_found_fireplaces
@pytest.fixture()
def mock_fireplace_finder_single() -> Generator[None, MagicMock, None]:
"""Mock fireplace finder."""
mock_found_fireplaces = Mock()
mock_found_fireplaces.ips = ["192.168.1.69"]
with patch(
"homeassistant.components.intellifire.config_flow.AsyncUDPFireplaceFinder.search_fireplace"
):
yield mock_found_fireplaces
@pytest.fixture
def mock_intellifire_config_flow() -> Generator[None, MagicMock, None]:
"""Return a mocked IntelliFire client."""
data_mock = Mock()
data_mock.serial = "12345"
with patch(
"homeassistant.components.intellifire.config_flow.IntellifireAsync",
autospec=True,
) as intellifire_mock:
intellifire = intellifire_mock.return_value
intellifire.data = data_mock
yield intellifire
def mock_api_connection_error() -> ConnectionError:
"""Return a fake a ConnectionError for iftapi.net."""
ret = ConnectionError()
ret.args = [ConnectionKey("iftapi.net", 443, False, None, None, None, None)]
return ret
| {
"content_hash": "ecf63e16ead022fd4acb34af4fea1dce",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 99,
"avg_line_length": 31.89830508474576,
"alnum_prop": 0.6976620616365569,
"repo_name": "toddeye/home-assistant",
"id": "3f73834226ceac8e9d91569047ed3ba2c9eaba56",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/intellifire/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""
Generator registration and query support.
"""
import pkg_resources
from collections import namedtuple
# An instance of this namedtuple must be registered in textx_gen entry point.
GenDesc = namedtuple('GenDesc', 'name lang desc genconf render validate')
def iter_generators():
"""
Iterates over registered generators and returns setuptools EntryPoint
instances.
"""
for ep in pkg_resources.iter_entry_points(group='textx_gen'):
yield ep
def get_generator_desc(generator_name):
for ep in iter_generators():
gen_desc = ep.load()
if gen_desc.name == generator_name:
return gen_desc
| {
"content_hash": "dbbb170b6265dfd911bdbf8cede818de",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 25,
"alnum_prop": 0.6938461538461539,
"repo_name": "igordejanovic/textx-tools",
"id": "029efc31d3438a41f32b5b0e4e70a80d62b53a03",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txtools/gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "257"
},
{
"name": "Python",
"bytes": "37724"
}
],
"symlink_target": ""
} |
import logging
import subprocess
import sys
import time
from desktop.supervisor import DjangoCommandSupervisee
from desktop.conf import KERBEROS as CONF
LOG = logging.getLogger(__name__)
SPEC = DjangoCommandSupervisee("kt_renewer")
NEED_KRB181_WORKAROUND=None
def renew_from_kt():
cmdv = [CONF.KINIT_PATH.get(),
"-k", # host ticket
"-t", CONF.HUE_KEYTAB.get(), # specify keytab
"-c", CONF.CCACHE_PATH.get(), # specify credentials cache
CONF.HUE_PRINCIPAL.get()]
LOG.info("Reinitting kerberos from keytab: " +
" ".join(cmdv))
ret = subprocess.call(cmdv)
if ret != 0:
LOG.error("Couldn't reinit from keytab!")
sys.exit(ret)
global NEED_KRB181_WORKAROUND
if NEED_KRB181_WORKAROUND is None:
NEED_KRB181_WORKAROUND = detect_conf_var()
if NEED_KRB181_WORKAROUND:
# HUE-640. Kerberos clock have seconds level granularity. Make sure we
# renew the ticket after the initial valid time.
time.sleep(1.5)
perform_krb181_workaround()
def perform_krb181_workaround():
cmdv = [CONF.KINIT_PATH.get(),
"-R",
"-c", CONF.CCACHE_PATH.get()]
LOG.info("Renewing kerberos ticket to work around kerberos 1.8.1: " +
" ".join(cmdv))
ret = subprocess.call(cmdv)
if ret != 0:
fmt_dict = dict(princ=CONF.HUE_PRINCIPAL.get(),
ccache=CONF.CCACHE_PATH.get())
LOG.error("Couldn't renew kerberos ticket in order to work around "
"Kerberos 1.8.1 issue. Please check that the ticket for "
"'%(princ)s' is still renewable:\n"
" $ kinit -f -c %(ccache)s\n"
"If the 'renew until' date is the same as the 'valid starting' "
"date, the ticket cannot be renewed. Please check your KDC "
"configuration, and the ticket renewal policy (maxrenewlife) "
"for the '%(princ)s' and `krbtgt' principals." % fmt_dict)
sys.exit(ret)
def detect_conf_var():
"""Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerboers 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
f = file(CONF.CCACHE_PATH.get(), "rb")
try:
data = f.read()
return "X-CACHECONF:" in data
finally:
f.close()
def run():
if CONF.HUE_KEYTAB.get() is None:
LOG.debug("Keytab renewer not starting, no keytab configured")
sys.exit(0)
while True:
renew_from_kt()
time.sleep(CONF.KEYTAB_REINIT_FREQUENCY.get())
| {
"content_hash": "06382b8cefb4634b7aebc21f5619e995",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 33.75,
"alnum_prop": 0.6421052631578947,
"repo_name": "pwong-mapr/private-hue",
"id": "6e6e41b504b8d4d5d281242fec5574ef6f52d242",
"size": "3357",
"binary": false,
"copies": "3",
"ref": "refs/heads/HUE-1096-abe",
"path": "desktop/core/src/desktop/kt_renewer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9913791"
},
{
"name": "C++",
"bytes": "200199"
},
{
"name": "CSS",
"bytes": "555666"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3076559"
},
{
"name": "JavaScript",
"bytes": "1072625"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "22498404"
},
{
"name": "Shell",
"bytes": "34636"
},
{
"name": "XSLT",
"bytes": "202363"
}
],
"symlink_target": ""
} |
import sys
import types
import warnings
import unittest
# Decorator used in the deprecation tests to reset the warning registry for
# test isolation and reproducibility.
def warningregistry(func):
def wrapper(*args, **kws):
missing = []
saved = getattr(warnings, '__warningregistry__', missing).copy()
try:
return func(*args, **kws)
finally:
if saved is missing:
try:
del warnings.__warningregistry__
except AttributeError:
pass
else:
warnings.__warningregistry__ = saved
return wrapper
class Test_TestLoader(unittest.TestCase):
### Basic object tests
################################################################
def test___init__(self):
loader = unittest.TestLoader()
self.assertEqual([], loader.errors)
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
@warningregistry
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
# With Python 3.5, the undocumented and unofficial use_load_tests is
# ignored (and deprecated).
load_tests_args = []
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore')
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [loader, suite, None])
@warningregistry
def test_loadTestsFromModule__use_load_tests_deprecated_positional(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
# The method still works.
loader = unittest.TestLoader()
# use_load_tests=True as a positional argument.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
suite = loader.loadTestsFromModule(m, False)
self.assertIsInstance(suite, unittest.TestSuite)
# load_tests was still called because use_load_tests is deprecated
# and ignored.
self.assertEqual(load_tests_args, [loader, suite, None])
# We got a warning.
self.assertIs(w[-1].category, DeprecationWarning)
self.assertEqual(str(w[-1].message),
'use_load_tests is deprecated and ignored')
@warningregistry
def test_loadTestsFromModule__use_load_tests_deprecated_keyword(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
# The method still works.
loader = unittest.TestLoader()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertIsInstance(suite, unittest.TestSuite)
# load_tests was still called because use_load_tests is deprecated
# and ignored.
self.assertEqual(load_tests_args, [loader, suite, None])
# We got a warning.
self.assertIs(w[-1].category, DeprecationWarning)
self.assertEqual(str(w[-1].message),
'use_load_tests is deprecated and ignored')
@warningregistry
def test_loadTestsFromModule__too_many_positional_args(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
with self.assertRaises(TypeError) as cm, \
warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
loader.loadTestsFromModule(m, False, 'testme.*')
# We still got the deprecation warning.
self.assertIs(w[-1].category, DeprecationWarning)
self.assertEqual(str(w[-1].message),
'use_load_tests is deprecated and ignored')
# We also got a TypeError for too many positional arguments.
self.assertEqual(type(cm.exception), TypeError)
self.assertEqual(
str(cm.exception),
'loadTestsFromModule() takes 1 positional argument but 3 were given')
@warningregistry
def test_loadTestsFromModule__use_load_tests_other_bad_keyword(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.assertRaises(TypeError) as cm:
loader.loadTestsFromModule(
m, use_load_tests=False, very_bad=True, worse=False)
self.assertEqual(type(cm.exception), TypeError)
# The error message names the first bad argument alphabetically,
# however use_load_tests (which sorts first) is ignored.
self.assertEqual(
str(cm.exception),
"loadTestsFromModule() got an unexpected keyword argument 'very_bad'")
def test_loadTestsFromModule__pattern(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m, pattern='testme.*')
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, 'testme.*'])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to call load_tests:' in error,
'missing error string in %r' % error)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('abc () //')
error, test = self.check_deferred_error(loader, suite)
expected = "Failed to import test module: abc () //"
expected_regex = "Failed to import test module: abc \(\) //"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(
ImportError, expected_regex, getattr(test, 'abc () //'))
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('sdasfasfasdf')
expected = "No module named 'sdasfasfasdf'"
error, test = self.check_deferred_error(loader, suite)
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(ImportError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute isn't?
def test_loadTestsFromName__unknown_attr_name_on_module(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('unittest.loader.sdasfasfasdf')
expected = "module 'unittest.loader' has no attribute 'sdasfasfasdf'"
error, test = self.check_deferred_error(loader, suite)
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute isn't?
def test_loadTestsFromName__unknown_attr_name_on_package(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('unittest.sdasfasfasdf')
expected = "No module named 'unittest.sdasfasfasdf'"
error, test = self.check_deferred_error(loader, suite)
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(ImportError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('sdasfasfasdf', unittest)
expected = "module 'unittest' has no attribute 'sdasfasfasdf'"
error, test = self.check_deferred_error(loader, suite)
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('', unittest)
error, test = self.check_deferred_error(loader, suite)
expected = "has no attribute ''"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, getattr(test, ''))
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
suite = loader.loadTestsFromName('abc () //', unittest)
error, test = self.check_deferred_error(loader, suite)
expected = "module 'unittest' has no attribute 'abc () //'"
expected_regex = "module 'unittest' has no attribute 'abc \(\) //'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(
AttributeError, expected_regex, getattr(test, 'abc () //'))
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignoring the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.testfoo', m)
expected = "type object 'MyTestCase' has no attribute 'testfoo'"
error, test = self.check_deferred_error(loader, suite)
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.testfoo)
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
def check_deferred_error(self, loader, suite):
"""Helper function for checking that errors in loading are reported.
:param loader: A loader with some errors.
:param suite: A suite that should have a late bound error.
:return: The first error message from the loader and the test object
from the suite.
"""
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
test = list(suite)[0]
return error, test
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
suite = loader.loadTestsFromNames(['abc () //'])
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "Failed to import test module: abc () //"
expected_regex = "Failed to import test module: abc \(\) //"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(
ImportError, expected_regex, getattr(test, 'abc () //'))
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['sdasfasfasdf'])
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "Failed to import test module: sdasfasfasdf"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(ImportError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(
['unittest.loader.sdasfasfasdf', 'unittest.test.dummy'])
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "module 'unittest.loader' has no attribute 'sdasfasfasdf'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "module 'unittest' has no attribute 'sdasfasfasdf'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
error, test = self.check_deferred_error(loader, list(suite)[1])
expected = "module 'unittest' has no attribute 'sdasfasfasdf'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.sdasfasfasdf)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([''], unittest)
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "has no attribute ''"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, getattr(test, ''))
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
suite = loader.loadTestsFromNames(['abc () //'], unittest)
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "module 'unittest' has no attribute 'abc () //'"
expected_regex = "module 'unittest' has no attribute 'abc \(\) //'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(
AttributeError, expected_regex, getattr(test, 'abc () //'))
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# #14971: Make sure the dotted name resolution works even if the actual
# function doesn't have the same name as is used to find it.
def test_loadTestsFromName__function_with_different_name_than_method(self):
# lambdas have the name '<lambda>'.
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
test = lambda: 1
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.testfoo'], m)
error, test = self.check_deferred_error(loader, list(suite)[0])
expected = "type object 'MyTestCase' has no attribute 'testfoo'"
self.assertIn(
expected, error,
'missing error string in %r' % error)
self.assertRaisesRegex(AttributeError, expected, test.testfoo)
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertIs(loader.suiteClass, unittest.TestSuite)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "34af95ad9141de19feb0664e300226c7",
"timestamp": "",
"source": "github",
"line_count": 1529,
"max_line_length": 83,
"avg_line_length": 39.74166121648136,
"alnum_prop": 0.626248662881593,
"repo_name": "batermj/algorithm-challenger",
"id": "31e2f0fc3d61dfb54f7c99866eca3b4f3cf226ae",
"size": "60765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/unittest/test/test_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
import Image, ImageFile
_handler = None
##
# Install application-specific FITS image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:6] == "SIMPLE"
class FITSStubImageFile(ImageFile.StubImageFile):
format = "FITS"
format_description = "FITS"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(6)):
raise SyntaxError("Not a FITS file")
# FIXME: add more sanity checks here; mandatory header items
# include SIMPLE, BITPIX, NAXIS, etc.
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("FITS save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept)
Image.register_save(FITSStubImageFile.format, _save)
Image.register_extension(FITSStubImageFile.format, ".fit")
Image.register_extension(FITSStubImageFile.format, ".fits")
| {
"content_hash": "428ad47b5231a8eacbf1ef0b62f156a2",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.5647058823529412,
"repo_name": "robiame/AndroidGeodata",
"id": "ca7b0d46ef411092089c0d0be29c6a4c01df73c3",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pil/FitsStubImagePlugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "615842"
}
],
"symlink_target": ""
} |
from .rocon_servicemanager import *
| {
"content_hash": "ec6fe066c272fc877056d07e5ae9bc1c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8055555555555556,
"repo_name": "jihoonl/orchestra",
"id": "bea3d6f5797f69041b2b09983efbdf99fdda611c",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orchestration_platform/src/orchestration_platform/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "162214"
},
{
"name": "JavaScript",
"bytes": "122374"
},
{
"name": "Python",
"bytes": "16130"
}
],
"symlink_target": ""
} |
from __future__ import (division, print_function,
absolute_import, unicode_literals)
import webbrowser
import shutil
import pathlib
# Use subprocess32 if available
try:
import subprocess32 as subprocess
except:
import subprocess as subprocess
from invoke import task
cwd = pathlib.Path('.')
def check_output(*args, **kwargs):
"""Subprocess check_output, but prints commands and output by default.
Also allows printing of error message for helpful debugging.
Use print_all=False to turn off all printing."""
print_all = kwargs.pop('print_all', None)
if print_all is not None:
print_in = print_all
print_out = print_all
else:
print_in = kwargs.pop('print_in', True)
print_out = kwargs.pop('print_out', True)
if print_in:
print('')
print(' '.join(args[0]))
try:
out_bytes = subprocess.check_output(*args, **kwargs)
out_lines = out_bytes.decode('utf-8').splitlines()
except subprocess.CalledProcessError as e:
# Wrap in try/except so that check_output can print
raise e
if print_out:
for line in out_lines:
print(line)
return out_lines
# Helper functions for unix-like removing folders and files.
def rm_rf(*args):
"""Recursively delete directories, if they exist"""
for directory in args:
try:
shutil.rmtree(str(directory))
except OSError:
pass
@task(default=True)
def help():
"""Print out a helpful message."""
print("""\
Usage: inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts]
Tasks:
clean Delete the contents of the _build/ directory
html Create sphinx documentation as stand-alone HTML files
open Open the HTML documentation in a web browser
To see more about a specific task, run invoke --help task""")
@task
def clean():
"""Delete the contents of the _build/ directory."""
rm_rf(cwd/'_build')
@task
def html():
"""Create sphinx documentation as stand-alone HTML files."""
check_output(['sphinx-build', '-b', 'html', '.', str(cwd/'_build/html')])
@task
def open():
"""Open the HTML documentation in a browser"""
index_path = cwd/'_build/html/index.html'
webbrowser.open(index_path.absolute().as_uri())
| {
"content_hash": "27850b3aff3147633ab02e78baef64ac",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 24.28125,
"alnum_prop": 0.6383526383526383,
"repo_name": "marohngroup/kpfm",
"id": "035bd0aaabf44d2d60b76631b612b18686446f35",
"size": "2599",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "118039"
}
],
"symlink_target": ""
} |
import logging
from c7n.actions import Action as BaseAction
from c7n.utils import local_session, chunks, type_schema
from c7n.exceptions import PolicyValidationError
from kubernetes.client import V1DeleteOptions
log = logging.getLogger('custodian.k8s.actions')
class Action(BaseAction):
pass
class MethodAction(Action):
method_spec = ()
chunk_size = 20
def validate(self):
if not self.method_spec:
raise NotImplementedError("subclass must define method_spec")
return self
def process(self, resources):
session = local_session(self.manager.session_factory)
m = self.manager.get_model()
client = session.client(m.group, m.version)
for resource_set in chunks(resources, self.chunk_size):
self.process_resource_set(client, resource_set)
def process_resource_set(self, client, resources):
op_name = self.method_spec['op']
op = getattr(client, op_name)
for r in resources:
op(name=r['metadata']['name'])
class PatchAction(MethodAction):
"""
Patches a resource
Requires patch and namespaced attributes on the resource definition
"""
def validate(self):
if not self.manager.get_model().patch:
raise PolicyValidationError('patch attribute not defined for resource')
return self
def get_permissions(self):
patch = self.manager.get_model().patch
return ''.join([a.capitalize() for a in patch.split('_')])
def patch_resources(self, client, resources, **patch_args):
op = getattr(client, self.manager.get_model().patch)
namespaced = self.manager.get_model().namespaced
for r in resources:
patch_args['name'] = r['metadata']['name']
if namespaced:
patch_args['namespace'] = r['metadata']['namespace']
op(**patch_args)
class PatchResource(PatchAction):
"""
Patches a resource
"""
schema = type_schema(
'patch',
**{'options': {'type': 'object'}}
)
def process_resource_set(self, client, resources):
patch_args = {'body': self.data.get('options', {})}
self.patch_resources(client, resources, **patch_args)
@classmethod
def register_resources(klass, registry, resource_class):
model = resource_class.resource_type
if hasattr(model, 'patch') and hasattr(model, 'namespaced'):
resource_class.action_registry.register('patch', klass)
class DeleteAction(MethodAction):
"""
Deletes a resource
Requires delete and namespaced attributes on the resource definition
"""
def validate(self):
if not self.manager.get_model().delete:
raise PolicyValidationError('delete attribute not defined for resource')
return self
def get_permissions(self):
delete = self.manager.get_model().delete
return ''.join([a.capitalize() for a in delete.split('_')])
def delete_resources(self, client, resources, **delete_args):
op = getattr(client, self.manager.get_model().delete)
namespaced = self.manager.get_model().namespaced
for r in resources:
delete_args['name'] = r['metadata']['name']
if namespaced:
delete_args['namespace'] = r['metadata']['namespace']
op(**delete_args)
class DeleteResource(DeleteAction):
"""
Deletes a Resource
.. code-block:: yaml
policies:
- name: delete-resource
resource: k8s.pod # k8s.{resource}
filters:
- 'metadata.name': 'test-{resource}'
actions:
- delete
"""
schema = type_schema(
'delete',
grace_period_seconds={'type': 'integer'},
)
def process_resource_set(self, client, resources):
grace = self.data.get('grace_period_seconds', 30)
body = V1DeleteOptions()
body.grace_period_seconds = grace
delete_args = {'body': body}
self.delete_resources(client, resources, **delete_args)
@classmethod
def register_resources(klass, registry, resource_class):
model = resource_class.resource_type
if hasattr(model, 'delete') and hasattr(model, 'namespaced'):
resource_class.action_registry.register('delete', klass)
| {
"content_hash": "4f542b55847d74fe778d56b2b2b18bf4",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 84,
"avg_line_length": 31.39855072463768,
"alnum_prop": 0.626355873528733,
"repo_name": "ocampocj/cloud-custodian",
"id": "c9a7673c41eea382b25b178186167e167b39f98c",
"size": "4924",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/c7n_kube/c7n_kube/actions/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "141794"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1440"
},
{
"name": "Python",
"bytes": "4854167"
},
{
"name": "Shell",
"bytes": "7227"
}
],
"symlink_target": ""
} |
"""
Este processamento gera uma tabulação de idiomas de publicação de cada artigo
da coleção SciELO.
Formato de saída:
"PID","ISSN","título","área temática","ano de publicação","tipo de documento","idiomas","pt","es","en","other","pt-es","pt-en","en-es","exclusivo nacional","exclusivo estrangeiro","nacional + estrangeiro"
"""
import argparse
import logging
import codecs
import utils
logger = logging.getLogger(__name__)
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
class Dumper(object):
def __init__(self, collection, issns=None, output_file=None):
self._ratchet = utils.ratchet_server()
self._articlemeta = utils.articlemeta_server()
self.collection = collection
self.issns = issns
self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file
header = [u"PID",u"ISSN",u"título",u"área temática",u"ano de publicação",u"tipo de documento",u"idiomas",u"pt",u"es",u"en",u"other",u"pt-es",u"pt-en",u"en-es",u"exclusivo nacional",u"exclusivo estrangeiro",u"nacional + estrangeiro"]
self.write(','.join(header))
def write(self, line):
if not self.output_file:
print(line.encode('utf-8'))
else:
self.output_file.write('%s\r\n' % line)
def run(self):
for item in self.items():
self.write(item)
def items(self):
if not self.issns:
self.issns = [None]
for issn in self.issns:
for data in self._articlemeta.documents(collection=self.collection, issn=issn):
logger.debug('Reading document: %s' % data.publisher_id)
yield self.fmt_csv(data)
def fmt_csv(self, data):
know_languages = set(['pt', 'es', 'en'])
languages = set(data.languages())
line = []
line.append(data.publisher_id)
line.append(data.journal.scielo_issn)
line.append(data.journal.title)
line.append(','.join(data.journal.subject_areas))
line.append(data.publication_date[0:4])
line.append(data.document_type)
line.append(','.join(languages))
line.append('1' if 'pt' in languages else '0') # PT
line.append('1' if 'es' in languages else '0') # ES
line.append('1' if 'en' in languages else '0') # EN
line.append('1' if len(languages.difference(know_languages)) > 0 else '0') # OTHER
line.append('1' if 'pt' in languages and 'es' in languages and len(languages) == 2 else '0') # PT-ES
line.append('1' if 'pt' in languages and 'en' in languages and len(languages) == 2 else '0') # PT-EN
line.append('1' if 'es' in languages and 'en' in languages and len(languages) == 2 else '0') # ES-EN
line.append('1' if 'pt' in languages and len(languages) == 1 else '0') # Exclusivo Nacional
line.append('1' if not 'pt' in languages and len(languages) > 0 else '0') # Exclusivo Estrangeiro
line.append('1' if 'pt' in languages and len(languages) > 1 else '0') # Nacional + Estrangeiro
joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line])
return joined_line
def main():
parser = argparse.ArgumentParser(
description='Dump languages distribution by article'
)
parser.add_argument(
'issns',
nargs='*',
help='ISSN\'s separated by spaces'
)
parser.add_argument(
'--collection',
'-c',
help='Collection Acronym'
)
parser.add_argument(
'--output_file',
'-r',
help='File to receive the dumped data'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
logger.info('Dumping data for: %s' % args.collection)
issns = None
if len(args.issns) > 0:
issns = utils.ckeck_given_issns(args.issns)
dumper = Dumper(args.collection, issns, args.output_file)
dumper.run()
| {
"content_hash": "c6ecae7c8d5ef53ea59895411c0dc0b5",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 240,
"avg_line_length": 33.020270270270274,
"alnum_prop": 0.6020053202373644,
"repo_name": "fabiobatalha/processing",
"id": "5dc3742371c145af4faa909bb21a76676bc6dbbe",
"size": "4920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publication/languages.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "168467"
},
{
"name": "Thrift",
"bytes": "5552"
}
],
"symlink_target": ""
} |
import datetime
import os
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from helpers.data_fetchers.team_details_data_fetcher import TeamDetailsDataFetcher
from helpers.award_helper import AwardHelper
from helpers.event_helper import EventHelper
from helpers.match_helper import MatchHelper
from helpers.media_helper import MediaHelper
from models.award import Award
from models.event_team import EventTeam
from models.match import Match
from models.media import Media
class TeamRenderer(object):
@classmethod
def render_team_details(cls, handler, team, year, is_canonical):
media_key_futures = Media.query(Media.references == team.key, Media.year == year).fetch_async(500, keys_only=True)
events_sorted, matches_by_event_key, awards_by_event_key, valid_years = TeamDetailsDataFetcher.fetch(team, year, return_valid_years=True)
if not events_sorted:
return None
media_futures = ndb.get_multi_async(media_key_futures.get_result())
participation = []
year_wlt_list = []
year_match_avg_list = []
current_event = None
matches_upcoming = None
short_cache = False
for event in events_sorted:
event_matches = matches_by_event_key.get(event.key, [])
event_awards = AwardHelper.organizeAwards(awards_by_event_key.get(event.key, []))
matches_organized = MatchHelper.organizeMatches(event_matches)
if event.now:
current_event = event
matches_upcoming = MatchHelper.upcomingMatches(event_matches)
if event.within_a_day:
short_cache = True
if year == 2015:
display_wlt = None
match_avg = EventHelper.calculateTeamAvgScoreFromMatches(team.key_name, event_matches)
year_match_avg_list.append(match_avg)
qual_avg, elim_avg, _, _ = match_avg
else:
qual_avg = None
elim_avg = None
wlt = EventHelper.calculateTeamWLTFromMatches(team.key_name, event_matches)
year_wlt_list.append(wlt)
if wlt["win"] + wlt["loss"] + wlt["tie"] == 0:
display_wlt = None
else:
display_wlt = wlt
team_rank = None
if event.rankings:
for element in event.rankings:
if str(element[1]) == str(team.team_number):
team_rank = element[0]
break
participation.append({'event': event,
'matches': matches_organized,
'wlt': display_wlt,
'qual_avg': qual_avg,
'elim_avg': elim_avg,
'rank': team_rank,
'awards': event_awards})
if year == 2015:
year_wlt = None
year_qual_scores = []
year_elim_scores = []
for _, _, event_qual_scores, event_elim_scores in year_match_avg_list:
year_qual_scores += event_qual_scores
year_elim_scores += event_elim_scores
year_qual_avg = float(sum(year_qual_scores)) / len(year_qual_scores) if year_qual_scores != [] else None
year_elim_avg = float(sum(year_elim_scores)) / len(year_elim_scores) if year_elim_scores != [] else None
else:
year_qual_avg = None
year_elim_avg = None
year_wlt = {"win": 0, "loss": 0, "tie": 0}
for wlt in year_wlt_list:
year_wlt["win"] += wlt["win"]
year_wlt["loss"] += wlt["loss"]
year_wlt["tie"] += wlt["tie"]
if year_wlt["win"] + year_wlt["loss"] + year_wlt["tie"] == 0:
year_wlt = None
medias_by_slugname = MediaHelper.group_by_slugname([media_future.get_result() for media_future in media_futures])
handler.template_values.update({
"is_canonical": is_canonical,
"team": team,
"participation": participation,
"year": year,
"years": valid_years,
"year_wlt": year_wlt,
"year_qual_avg": year_qual_avg,
"year_elim_avg": year_elim_avg,
"current_event": current_event,
"matches_upcoming": matches_upcoming,
"medias_by_slugname": medias_by_slugname
})
if short_cache:
handler._cache_expiration = handler.SHORT_CACHE_EXPIRATION
path = os.path.join(os.path.dirname(__file__), '../templates/team_details.html')
return template.render(path, handler.template_values)
@classmethod
def render_team_history(cls, handler, team, is_canonical):
event_team_keys_future = EventTeam.query(EventTeam.team == team.key).fetch_async(1000, keys_only=True)
award_keys_future = Award.query(Award.team_list == team.key).fetch_async(1000, keys_only=True)
event_teams_futures = ndb.get_multi_async(event_team_keys_future.get_result())
awards_futures = ndb.get_multi_async(award_keys_future.get_result())
event_keys = [event_team_future.get_result().event for event_team_future in event_teams_futures]
events_futures = ndb.get_multi_async(event_keys)
awards_by_event = {}
for award_future in awards_futures:
award = award_future.get_result()
if award.event.id() not in awards_by_event:
awards_by_event[award.event.id()] = [award]
else:
awards_by_event[award.event.id()].append(award)
event_awards = []
current_event = None
matches_upcoming = None
short_cache = False
for event_future in events_futures:
event = event_future.get_result()
if event.now:
current_event = event
team_matches_future = Match.query(Match.event == event.key, Match.team_key_names == team.key_name)\
.fetch_async(500, keys_only=True)
matches = ndb.get_multi(team_matches_future.get_result())
matches_upcoming = MatchHelper.upcomingMatches(matches)
if event.within_a_day:
short_cache = True
if event.key_name in awards_by_event:
sorted_awards = AwardHelper.organizeAwards(awards_by_event[event.key_name])
else:
sorted_awards = []
event_awards.append((event, sorted_awards))
event_awards = sorted(event_awards, key=lambda (e, _): e.start_date if e.start_date else datetime.datetime(e.year, 12, 31))
years = sorted(set([et.get_result().year for et in event_teams_futures if et.get_result().year is not None]))
handler.template_values.update({
'is_canonical': is_canonical,
'team': team,
'event_awards': event_awards,
'years': years,
'current_event': current_event,
'matches_upcoming': matches_upcoming
})
if short_cache:
handler._cache_expiration = handler.SHORT_CACHE_EXPIRATION
path = os.path.join(os.path.dirname(__file__), '../templates/team_history.html')
return template.render(path, handler.template_values)
| {
"content_hash": "5351685634ec1adb27f82e6274e42f53",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 145,
"avg_line_length": 41.40555555555556,
"alnum_prop": 0.5721186099557225,
"repo_name": "1fish2/the-blue-alliance",
"id": "f236e76372f5bed247bf992d8cb8a150c22ccea5",
"size": "7453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "renderers/team_renderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "372038"
},
{
"name": "HTML",
"bytes": "5350307"
},
{
"name": "JavaScript",
"bytes": "240747"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "1529862"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "413"
}
],
"symlink_target": ""
} |
from .sparseTensorWrapper import SparseTensor
from .canoTensor import CanoTensor
from .tucker import Tucker
from .tensorTrain import TensorTrain
| {
"content_hash": "b0e463af01f5672f0ee16aacb6ade823",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 36.25,
"alnum_prop": 0.8620689655172413,
"repo_name": "vondrejc/FFTHomPy",
"id": "eb4c7c3ecc6ceb7d0c36f0ecc12b53f36f5480cd",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ffthompy/tensorsLowRank/objects/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "332845"
}
],
"symlink_target": ""
} |
import datetime
import logging
from decimal import Decimal
from django.core.urlresolvers import reverse, NoReverseMatch
from django.conf import settings
from django.db import models
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django.utils.functional import cached_property
from django.utils.formats import date_format, time_format, number_format
from django.utils.translation import ugettext_lazy as _
from .exceptions import AttrCrudError, SetupCrudError, ReverseCrudError
logger = logging.getLogger('django')
def maybe_call(value_or_func, *args, **kwargs):
return value_or_func(*args, **kwargs) if callable(value_or_func) else value_or_func
class RichViewMixin:
#: an instance of django.db.models.Model to be displayed, this is already required for ListView or DetailView
model = None
buttons = []
title = None
def __init__(self, *args, **kwargs):
self._meta = self.model._meta
super(RichViewMixin, self).__init__(*args, **kwargs)
def getattr(self, name, alt=AttrCrudError):
if hasattr(self, name):
return getattr(self, name)
elif hasattr(self, 'object') and hasattr(self.object, name):
return getattr(self.object, name)
if isinstance(alt, type) and issubclass(alt, Exception):
raise alt('%s not found on %s instance or self.object' % (name, self.__class__.__name__))
else:
return alt
def check_show_button(self, button):
if button is None or ('url' in button and button['url'] is None):
return False
if not isinstance(button, dict):
return True
if 'show_if' not in button:
return True
if button['show_if'] in {True, False}:
return button['show_if']
show_if = self.getattr(button['show_if'])
return bool(maybe_call(show_if))
def process_buttons(self, button_group):
if not button_group:
return []
if not isinstance(button_group, (dict, str)):
return [self.process_buttons(button) for button in button_group if self.check_show_button(button)]
return self.process_button(button_group)
def get_url(self, value):
if isinstance(value, str):
if value.startswith('func|'):
value = self.getattr(value[5:])()
elif value.startswith('rev|'):
value = self.get_rev_url(value[4:], getattr(self, 'object', None))
elif isinstance(value, models.Model):
if hasattr(value, 'get_absolute_url'):
return value.get_absolute_url()
else:
raise AttrCrudError('Model instance "{!r}" has no "get_absolute_url" method'.format(value))
return value
def get_rev_url(self, view_name, obj=None):
rev_tries = [{'viewname': view_name}]
if isinstance(obj, models.Model):
rev_tries.append({'viewname': view_name, 'args': (obj.pk,)})
url = None
for rev_try in rev_tries:
try:
url = reverse(**rev_try)
except NoReverseMatch:
pass
else:
break
if url is None:
raise ReverseCrudError('No reverse found for "{}"'.format(view_name))
return url
def process_button(self, button):
if isinstance(button, str):
button = {
'text': self.get_sub_attr(button) or button,
'url': button,
}
if 'url' in button:
button['url'] = self.get_url(button['url'])
elif 'dropdown' in button:
if isinstance(button['dropdown'], str) and button['dropdown'].startswith('func|'):
fname = button['dropdown'].split('|', 1)[1]
button['dropdown'] = self.getattr(fname)()
button['dropdown'] = list(map(self.process_button, button['dropdown']))
else:
raise SetupCrudError('neither "url" nor "dropdown" found in button: {!r}'.format(button))
return button
@cached_property
def label_ctx(self):
return dict(
verbose_name=self._meta.verbose_name,
verbose_name_plural=self._meta.verbose_name_plural,
object=getattr(self, 'object', None),
)
def get_sub_attr(self, attr_name, obj=None, prop_name='short_description'):
"""
get a property of an object's attribute by name.
:param obj: object to look at
:param attr_name: name to get short_description for
:param prop_name: name of property to get, typically "short_description"
:return: property value or None
"""
attr_name = attr_name.split('|', 1)[-1]
if obj:
attr = getattr(obj, attr_name, None)
else:
attr = self.getattr(attr_name, None)
if attr:
v = getattr(attr, prop_name, None)
if v is not None:
return v.format(**self.label_ctx)
def get_buttons(self):
return self.buttons
def get_context_data(self, **kwargs):
kwargs.update(
buttons=self.process_buttons(self.get_buttons()),
title=self.get_title(),
model_name=self._meta.verbose_name,
plural_model_name=self._meta.verbose_name_plural,
)
return super(RichViewMixin, self).get_context_data(**kwargs)
def get_title(self):
return self.title.format(**self.label_ctx) if self.title else self._meta.verbose_name_plural
RENDER_MAILTO = getattr(settings, 'RENDER_MAILTO', True)
# noinspection PyMethodMayBeStatic
class FormatMixin:
"""
General purpose mixin for converting virtually anything to a string intelligently.
Reasonable defaults are provided, but most thing scan be changed.
"""
def fmt_none_empty(self, value):
return mark_safe('—')
def fmt_field_choices(self, value, field):
choice_dict = dict(field.choices)
return choice_dict.get(value, value)
def fmt_email_field(self, value):
if RENDER_MAILTO:
return mark_safe('<a href="mailto:{0}" target="blank">{0}</a>'.format(escape(value)))
else:
return value
def fmt_url_field(self, value):
return mark_safe('<a href="{0}" target="blank">{0}</a>'.format(escape(value)))
def fmt_bool(self, value):
icon = 'ok' if value else 'remove'
return mark_safe('<span class="glyphicon glyphicon-{} bool"></span>'.format(icon))
def fmt_iter(self, value):
return ', '.join(map(self.format_value, value))
def fmt_number(self, value):
return number_format(value)
def fmt_datetime(self, value):
return date_format(value, 'DATETIME_FORMAT')
def fmt_date(self, value):
return date_format(value)
def fmt_time(self, value):
return time_format(value, 'TIME_FORMAT')
def format_value(self, value, field=None): # noqa, cyclomatic complexity > 10
if callable(value):
value = value()
elif value in (None, ''):
print('fmt_none_empty', repr(self.fmt_none_empty(value)))
return self.fmt_none_empty(value)
elif field and len(field.choices) > 0:
return self.fmt_field_choices(value, field)
elif isinstance(field, models.EmailField):
return self.fmt_email_field(value)
elif isinstance(field, models.URLField):
return self.fmt_url_field(value)
elif isinstance(value, bool):
return self.fmt_bool(value)
elif isinstance(value, (list, tuple, QuerySet)):
return self.fmt_iter(value)
elif isinstance(value, (Decimal, float, int)):
return self.fmt_number(value)
elif isinstance(value, datetime.datetime):
return self.fmt_datetime(value)
elif isinstance(value, datetime.date):
return self.fmt_date(value)
elif isinstance(value, datetime.time):
return self.fmt_time(value)
return value
class ItemDisplayMixin(FormatMixin, RichViewMixin):
"""
ItemDisplayMixin works with ListView and DetailView to simplify the process of listing and displaying a model.
This class should be "mixed in" before ListView or DetailView so it can override their attributes.
"""
#: list of references to attributes of instances of the model, items maybe
#: * field names
#: * references to related fields either using Django's "thing__related_ob" syntax or "thing.related_ob" syntax
#: * references to functions in the class, they should be identified by "func|name_of_function" the function
#: should take an instance of the model as it's only argument as in "def name_of_function(self, obj):..."
#: * pattern for a reverse link to a page in the form at "rev|view-name|field_or_func" field_or_func
#: may be any of the above options eg. "thing__related_ob", "thing.related_ob" or "func|name_of_function"
#: * any of the above may be the second value in a tuple where the first value is a verbose name
#: to use for the field if you don't like it's standard verbose name.
display_items = []
#: subset of display_items which are considered "long" eg. TextField's which should be displayed
#: full width not in columns, long_items will be yielded by gen_object_long
#: otherwise by gen_object_short
#: field to order the model by, if None no ordering is performed here
order_by = None
#: number of items to show on each each page
paginate_by = 20
extra_field_info = {}
def __init__(self, *args, **kwargs):
super(ItemDisplayMixin, self).__init__(*args, **kwargs)
self._field_names = [f.name for f in self._meta.fields]
self._extra_attrs = []
def get_queryset(self):
"""
Overrides standard the standard get_queryset to order the qs and call select_related.
:return:
"""
qs = super(ItemDisplayMixin, self).get_queryset()
if self.order_by:
qs = qs.order_by(*self.order_by)
return qs
def get_detail_url(self, obj):
"""
Only relevant on list view.
:param obj: instance of model to get url for
Returns: url of
"""
if hasattr(obj, 'get_absolute_url'):
return obj.get_absolute_url()
else:
raise AttrCrudError('Model instance "{!r}" has no "get_absolute_url" method'.format(obj))
def gen_short_props(self, obj):
"""
Generate short property data for a given object.
:param obj: the object to to find generate attributes for
:yield: dict of data about each attribute
"""
for field_info in self._item_info:
if not field_info.is_long:
yield self._display_value(obj, field_info)
def gen_long_props(self, obj):
"""
Generate long property data for a given object.
:param obj: the object to to find generate attributes for
:yield: dict of data about each attribute
"""
for field_info in self._item_info:
if field_info.is_long:
yield self._display_value(obj, field_info)
@cached_property
def _item_info(self):
"""
Returns a list of FieldInfo instances.
After the first call the list is cached to improve performance.
:return: list of tuples for each item in display_items
"""
return list(map(self._getattr_info, self.get_display_items()))
def get_display_items(self):
"""
return display items. Override to conditionally alter display items list.
:return: list of (short) items to display
"""
return self.display_items
def _getattr_info(self, attr_name):
"""
Finds the values for each item returned by _item_info.
:param attr_name: value direct from display_items
:return: FieldInfo instance
"""
field_info = FieldInfo(attr_name)
if field_info.is_func:
field_info.verbose_name = field_info.verbose_name or self.get_sub_attr(field_info.attr_name)
field_info.verbose_name = field_info.verbose_name or field_info.attr_name
field_info.help_text = field_info.help_text or self.get_sub_attr(field_info.attr_name, 'help_text')
return field_info
model, meta, field_names = self.model, self._meta, self._field_names
attr_name_part = None
for attr_name_part in self._split_attr_name(field_info.attr_name):
if attr_name_part in field_names:
field_info.field = meta.get_field_by_name(attr_name_part)[0]
if field_info.field.rel:
model = field_info.field.rel.to
meta = model._meta
field_names = [f.name for f in meta.fields]
self._find_verbose_name(field_info, model, attr_name_part)
self._find_help_text(field_info, model, attr_name_part)
# make TextFields "long"
if field_info.is_long is None and isinstance(field_info.field, models.TextField):
field_info.is_long = True
return field_info
def _find_verbose_name(self, field_info, model, attr_name_part):
# find verbose name if it's None so far
if field_info.verbose_name is None:
# priority_short_description has priority over field.verbose_name even when it's on a related model
field_info.verbose_name = self.get_sub_attr(attr_name_part, model, 'priority_short_description')
if not field_info.verbose_name:
if field_info.field:
field_info.verbose_name = field_info.field.verbose_name
else:
field_info.verbose_name = self.get_sub_attr(attr_name_part, model)
if not field_info.verbose_name:
field_info.verbose_name = field_info.attr_name
def _find_help_text(self, field_info, model, attr_name_part):
if field_info.help_text is None:
field_info.help_text = self.get_sub_attr(attr_name_part, model, 'priority_help_text')
if not field_info.help_text:
if field_info.field:
field_info.help_text = field_info.field.help_text
else:
field_info.help_text = self.get_sub_attr(attr_name_part, model, 'help_text')
@staticmethod
def _split_attr_name(attr_name):
"""
split an attribute name either on '__' or '.'
"""
return attr_name.replace('__', '.').split('.')
def _display_value(self, obj, field_info):
"""
Generates a value for an attribute, optionally generate it's url and make it a link and returns it
together with with it's verbose name.
If the attribute name refers to a function the value is returned raw (after reversing if rev_view_name,
otherwise it's processed by convert_to_string.
:param obj: any instance of the model to get the value from.
:param field_info: is FieldInfo below
:return: tuple containing (verbose_name, help_text, value)
"""
if field_info.is_func:
value = self.getattr(field_info.attr_name)(obj)
else:
value = self._get_object_value(obj, field_info.attr_name)
url = None
if field_info.detail_view_link:
url = self.get_detail_url(obj)
elif field_info.rev_view_name and hasattr(value, 'pk'):
url = self.get_rev_url(field_info.rev_view_name, value)
if not field_info.is_func:
value = self.format_value(value, field_info.field)
if url:
value = mark_safe('<a href="%s">%s</a>' % (url, escape(value)))
return {
'name': field_info.verbose_name,
'value': value,
'help_text': field_info.help_text or None,
'extra': self.extra_field_info.get(field_info.attr_name, {})
}
def _get_object_value(self, obj, attr_name):
"""
Chomp through attribute names from display_items to get the attribute or related attribute
from the object
:param obj: the object to find the attribute for
:param attr_name: the attribute name
:return: the attribute
"""
for b in self._split_attr_name(attr_name):
if obj:
obj = getattr(obj, b)
return obj
class FieldInfo(object):
"""
Simple namespace for information about fields.
field: the type of the field of the attribute, 'func!' if it's a function
attr_name: the attribute name from display_items
verbose_name: the verbose name of that field
help_text: help text for this field, None if not supplied
rev_view_name: view name to reverse to get item url, None if no reverse link
is_long: boolean indicating if the field should be considered "long"
"""
field = None
verbose_name = None
help_text = None
rev_view_name = None
detail_view_link = False
is_long = None
is_func = False
def __init__(self, attr_name):
self.attr_name = attr_name
if isinstance(self.attr_name, tuple):
if len(self.attr_name) == 2:
self.verbose_name, self.attr_name = self.attr_name
self.help_text = False
elif len(self.attr_name) == 3:
self.verbose_name, self.attr_name, self.help_text = self.attr_name
else:
raise SetupCrudError('display_item tuples must be 2 or 3 in length, not %d' % len(self.attr_name))
if self.attr_name.startswith('long|'):
self.attr_name = self.attr_name[5:]
self.is_long = True
elif self.attr_name.startswith('short|'):
self.attr_name = self.attr_name[6:]
self.is_long = False
if self.attr_name.startswith('link|'):
self.attr_name = self.attr_name[5:]
self.detail_view_link = True
if self.attr_name.startswith('func|'):
self.attr_name = self.attr_name[5:]
self.is_func = True
if self.attr_name.startswith('rev|'):
parts = self.attr_name.split('|', 2)
_, self.rev_view_name, self.attr_name = parts
class GetAttrMixin:
def getattr(self, name, raise_ex=True):
if hasattr(self.ctrl, name):
return getattr(self.ctrl, name)
return super(GetAttrMixin, self).getattr(name, raise_ex)
class RichListViewMixin(GetAttrMixin, ItemDisplayMixin):
def get_detail_url(self, obj):
return self.ctrl.relative_url('details/{}'.format(obj.pk))
class RichDetailViewMixin(GetAttrMixin, ItemDisplayMixin):
title = _('{object}')
class RichCreateViewMixin(RichViewMixin):
title = _('Create {verbose_name}')
class RichUpdateViewMixin(RichViewMixin):
title = _('Update {verbose_name}')
class RichDeleteViewMixin(RichViewMixin):
title = _('Delete {verbose_name}')
| {
"content_hash": "de769997af14f1f0a711860f92061a48",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 115,
"avg_line_length": 37.56360078277886,
"alnum_prop": 0.6087001823391508,
"repo_name": "samuelcolvin/django-crud",
"id": "5f3c06583b34f5fa00434d97b10a229e707e44cc",
"size": "19195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_crud/rich_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "9515"
},
{
"name": "JavaScript",
"bytes": "76"
},
{
"name": "Python",
"bytes": "62167"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
"""
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.encoding import smart_unicode, is_protected_type
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
return {
"pk": smart_unicode(obj._get_pk_val(), strings_only=True),
"model": smart_unicode(obj._meta),
"fields": self._current
}
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: smart_unicode(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = _get_model(d["model"])
data = {Model._meta.pk.attname : Model._meta.pk.to_python(d["pk"])}
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, models.ManyToManyRel):
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__'):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return smart_unicode(field.rel.to._meta.pk.to_python(value))
else:
m2m_convert = lambda v: smart_unicode(field.rel.to._meta.pk.to_python(v))
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
# Handle FK fields
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
if field_value is not None:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__'):
obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
value = value.pk
else:
value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
data[field.name] = field.to_python(field_value)
yield base.DeserializedObject(Model(**data), m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.module_name" string.
"""
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
return Model
| {
"content_hash": "a5e65d41c1b9213705a681acdb8174fe",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 126,
"avg_line_length": 40.2027972027972,
"alnum_prop": 0.572621325447904,
"repo_name": "aleida/django",
"id": "333161c929560b5a1b650056fe23115b7763a10e",
"size": "5749",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "django/core/serializers/python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50207"
},
{
"name": "JavaScript",
"bytes": "89078"
},
{
"name": "Python",
"bytes": "8135526"
},
{
"name": "Shell",
"bytes": "11901"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, availability_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, availability_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, availability_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str, availability_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_subscription_request(
subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_available_sizes_request(
resource_group_name: str, availability_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class AvailabilitySetsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2019_07_01.ComputeManagementClient`'s
:attr:`availability_sets` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: _models.AvailabilitySet,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Required.
:type parameters: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: Union[_models.AvailabilitySet, IO],
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AvailabilitySet")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@overload
def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: _models.AvailabilitySetUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Required.
:type parameters: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySetUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: Union[_models.AvailabilitySetUpdate, IO],
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySetUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AvailabilitySetUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, availability_set_name: str, **kwargs: Any
) -> None:
"""Delete an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, availability_set_name: str, **kwargs: Any) -> _models.AvailabilitySet:
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
request = build_get_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace
def list_by_subscription(self, expand: Optional[str] = None, **kwargs: Any) -> Iterable["_models.AvailabilitySet"]:
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySet or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets"} # type: ignore
@distributed_trace
def list(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.AvailabilitySet"]:
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySet or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.AvailabilitySet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets"} # type: ignore
@distributed_trace
def list_available_sizes(
self, resource_group_name: str, availability_set_name: str, **kwargs: Any
) -> Iterable["_models.VirtualMachineSize"]:
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSize or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.VirtualMachineSize]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineSizeListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_available_sizes.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_available_sizes.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes"} # type: ignore
| {
"content_hash": "0c76e5d4276841d59cef3c652a930fd5",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 204,
"avg_line_length": 44.770601336302896,
"alnum_prop": 0.6386180479554273,
"repo_name": "Azure/azure-sdk-for-python",
"id": "057babf218d14889164c4a9ab82412e069ed2166",
"size": "40704",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/operations/_availability_sets_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from room import Room
r = Room()
r.roomname = 'house56'
r.exits = {'west brook': 'westbrook'}
r.roomdesc = """
a house it is orange
"""
r.looktargets = {'door': 'its closed\n\n'}
| {
"content_hash": "ca770e0a9cb88aab9cf6aa8bbf4f2c60",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 20.11111111111111,
"alnum_prop": 0.6353591160220995,
"repo_name": "elstupido/rpg",
"id": "f3fcbcb37e6083871d380262d7046c6499d826ca",
"size": "181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rooms/first op/westbrookshouse56.room.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56488"
}
],
"symlink_target": ""
} |
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if Xy is None:
X = atleast2d_or_csc(X, copy=(copy_X and fit_intercept and not
sparse.isspmatrix(X)))
if not sparse.isspmatrix(X):
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
else:
n_samples = len(y)
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=None,
normalize=None, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.16. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5],
... fit_intercept=False)
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
return_models=return_models, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default False
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps and the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
See also
--------
ElasticNet
ElasticNetCV
"""
if return_models:
warnings.warn("Use enet_path(return_models=False), as it returns the"
" coefficients and alphas instead of just a list of"
" models as previously `lasso_path`/`enet_path` did."
" `return_models` will eventually be removed in 0.16,"
" after which, returning alphas and coefs"
" will become the norm.",
DeprecationWarning, stacklevel=2)
if normalize is True:
warnings.warn("normalize param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
else:
normalize = False
if fit_intercept is True or fit_intercept is None:
warnings.warn("fit_intercept param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
if fit_intercept is None:
fit_intercept = True
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.ones(n_features)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy=False)
n_samples = X.shape[0]
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
if coef_init is None:
coef_ = np.zeros(n_features, dtype=np.float64)
else:
coef_ = coef_init
models = []
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
dual_gaps = np.empty(n_alphas)
tol = params.get('tol', 1e-4)
positive = params.get('positive', False)
max_iter = params.get('max_iter', 1000)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if sparse.isspmatrix(X):
coef_, dual_gap_, eps_ = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, positive)
else:
coef_, dual_gap_, eps_ = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, positive)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations')
coefs[:, i] = coef_
dual_gaps[i] = dual_gap_
if return_models:
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.coef_ = coefs[:, i]
model.dual_gap_ = dual_gaps[-1]
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
models.append(model)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_models:
return models
else:
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std,
coef_init=coef_[k], max_iter=self.max_iter)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1,
X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype: a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
precompute = path_params['precompute']
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
# del path_params['precompute']
path_params = path_params.copy()
path_params['fit_intercept'] = False
path_params['normalize'] = False
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = atleast2d_or_csc(X_train, dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y[train], **path_params)
del X_train
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean - np.dot(X_mean, coefs)
residues = safe_sparse_dot(X_test, coefs) - y_test[:, np.newaxis]
residues += intercepts[np.newaxis, :]
this_mses = (residues ** 2).mean(axis=0)
return this_mses, l1_ratio
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# Dealing right with copy_X is important in the following:
# multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = atleast2d_or_csc(X, copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X)
copy_X = False
y = np.asarray(y, dtype=np.float64)
if y.ndim > 1:
raise ValueError("For multi-task outputs, fit the linear model "
"per output/task")
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
if alphas is None:
mean_l1_ratio = 1.
if hasattr(self, 'l1_ratio'):
mean_l1_ratio = np.mean(self.l1_ratio)
alphas = _alpha_grid(X, y, l1_ratio=mean_l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X)
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio, X_order='F',
dtype=np.float64)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
best_alpha = alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
self.alphas_ = np.asarray(alphas)
self.mse_path_ = np.squeeze(all_mse_paths)
# Refit the model with the parameters selected
model = ElasticNet()
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.fit(X, y)
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization chosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| {
"content_hash": "c6c7a00fa82c3881c31eb67d7317ed2f",
"timestamp": "",
"source": "github",
"line_count": 1471,
"max_line_length": 79,
"avg_line_length": 35.784500339904824,
"alnum_prop": 0.5897908394916317,
"repo_name": "Tong-Chen/scikit-learn",
"id": "9213ff53f0bfb20d0ac7ea077b10ba96073d97fa",
"size": "52883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/coordinate_descent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from secret import twitter_instance
from json import dump
import sys
tw = twitter_instance()
# [1]
response = tw.blocks.create(screen_name='showa_yojyo')
# [2]
dump(response, sys.stdout, ensure_ascii=False, indent=4, sort_keys=True)
| {
"content_hash": "5c349df379fcdd09be0289725ed9376c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.7415254237288136,
"repo_name": "showa-yojyo/notebook",
"id": "258a497bc42b1e28cf08431a2d5dce1f971aeb85",
"size": "359",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "doc/source/_sample/ptt/blocks-create.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import lx
import lxifc
# Item Events
ITEM_ADD = 0
ITEM_DELETE = 1
ITEM_RENAME = 2
VALUE_CHANGED = 3
#----------------------------------------------------------------------------------------------------------------------
# SceneItemListener - Helper class
#----------------------------------------------------------------------------------------------------------------------
class ItemEvents (lxifc.SceneItemListener):
'''
'''
def __init__ (self, callback):
self.listenerService = lx.service.Listener ()
#self.listenerService.AddListener (self)
self.callback = callback
self.event = None
self.item = None
self.action = None
self.index = None
def __del__ (self):
self.listenerService.RemoveListener (self)
def eventHandler (self, event, item, action, index):
self.event = event
self.item = item
self.action = action
self.index = index
self.callback (self)
def sil_ItemAdd(self, item):
self.eventHandler (ITEM_ADD, item, None, None)
def sil_ItemRemove(self, item):
self.eventHandler (ITEM_DELETE, item, None, None)
def sil_ItemName(self, item):
self.eventHandler (ITEM_RENAME, item, None, None)
def sil_ChannelValue(self, action, item, index):
self.eventHandler (VALUE_CHANGED, item, action, index)
def sil_SceneCreate(self, scene):
pass
def sil_SceneDestroy(self, scene):
pass
def sil_SceneFilename(self, scene, fileName):
pass
def sil_SceneClear(self, scene):
pass
def sil_ItemPreChange(self, scene):
pass
def sil_ItemPostDelete(self, scene):
pass
def sil_ItemParent(self, item):
pass
def sil_ItemChild(self, item):
pass
def sil_ItemAddChannel(self, item):
pass
def sil_ItemLocal(self, item):
pass
def sil_ItemSource(self, item):
pass
def sil_ItemPackage(self, item):
pass
def sil_LinkAdd(self, graph, itemFrom, itemTo):
pass
def sil_LinkRemBefore(self, graph, itemFrom, itemTo):
pass
def sil_LinkRemAfter(self, graph, itemFrom, itemTo):
pass
def sil_LinkSet(self, graph, itemFrom, itemTo):
pass
def sil_ChanLinkAdd(self, graph, itemFrom, chanFrom, itemTo, chanTo):
pass
def sil_ChanLinkRemAfter(self, graph, itemFrom, chanFrom, itemTo, chanTo):
pass
def sil_ChanLinkSet(self, graph, itemFrom, chanFrom, itemTo, chanTo):
pass
| {
"content_hash": "62e388c00a6a4d482d993379260a3230",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 119,
"avg_line_length": 22,
"alnum_prop": 0.625,
"repo_name": "tcrowson/Modo_LightBank",
"id": "b207ec9db161fb55180f5b7c0e57b9ce81a35bbd",
"size": "2513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46417"
}
],
"symlink_target": ""
} |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.tree import Node
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % tuple(self.params))
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = SortedDict()
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
subsituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = copy.deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = copy.deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = copy.deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
# Add the joins in the rhs query into the new query.
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
table, _, join_type, lhs, lhs_col, col, _ = rhs.alias_map[alias]
promote = join_type == self.LOUTER
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join((lhs, table, lhs_col, col),
conjunction and not first, used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join.
if not conjunction:
l_tables = set(self.tables)
r_tables = set(rhs.tables)
# Update r_tables aliases.
for alias in change_map:
if alias in r_tables:
# r_tables may contain entries that have a refcount of 0
# if the query has references to a table that can be
# trimmed because only the foreign key is used.
# We only need to fix the aliases for the tables that
# actually have aliases.
if rhs.alias_refcount[alias]:
r_tables.remove(alias)
r_tables.add(change_map[alias])
# Find aliases that are exclusive to rhs or lhs.
# These are promoted to outer joins.
outer_tables = (l_tables | r_tables) - (l_tables & r_tables)
for alias in outer_tables:
# Again, some of the tables won't have aliases due to
# the trimming of unnecessary tables.
if self.alias_refcount.get(alias) or rhs.alias_refcount.get(alias):
self.promote_alias(alias, True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = copy.deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = copy.deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.model._meta
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted by this call.
"""
if ((unconditional or self.alias_map[alias].nullable) and
self.alias_map[alias].join_type != self.LOUTER):
data = self.alias_map[alias]
data = data._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias].lhs_alias
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for k, aliases in self.join_map.items():
aliases = tuple([change_map.get(a, a) for a in aliases])
self.join_map[k] = aliases
for old_alias, new_alias in change_map.iteritems():
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = SortedDict()
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs].table_name
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias].lhs_alias]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias].lhs_alias != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
# Skip all proxy models
opts = self.model._meta.concrete_model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def need_force_having(self, q_object):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
for child in q_object.children:
if isinstance(child, Node):
if self.need_force_having(child):
return True
else:
if child[0].split(LOOKUP_SEP)[0] in self.aggregates:
return True
return False
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True, force_having=False):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
lookup_type = 'exact' # Default lookup type
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in self.query_terms
and arg not in self.aggregates):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = self.model
for counter, field_name in enumerate(parts):
try:
lookup_field = lookup_model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
lookup_type = parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
lookup_model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
lookup_type = parts.pop()
break
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
for alias, aggregate in self.aggregates.items():
if alias in (parts[0], LOOKUP_SEP.join(parts)):
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, connector)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, allow_explicit_fk=True,
can_reuse=can_reuse, negate=negate,
process_extras=process_extras)
except MultiJoin as e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
table_promote = False
join_promote = False
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
join_promote = True
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
nonnull_comparison = (lookup_type == 'isnull' and value is False)
col, alias, join_list = self.trim_joins(target, join_list, last, trim,
nonnull_comparison)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
next(join_it), next(table_it)
unconditional = False
for join in join_it:
table = next(table_it)
# Once we hit an outer join, all subsequent joins must
# also be promoted, regardless of whether they have been
# promoted as a result of this pass through the tables.
unconditional = (unconditional or
self.alias_map[join].join_type == self.LOUTER)
if join == table and self.alias_refcount[join] > 1:
# We have more than one reference to this join table.
# This means that we are dealing with two different query
# subtrees, so we don't need to do any join promotion.
continue
join_promote = join_promote or self.promote_alias(join, unconditional)
if table != join:
table_promote = self.promote_alias(table)
# We only get here if we have found a table that exists
# in the join list, but isn't on the original tables list.
# This means we've reached the point where we only have
# new tables, so we can break out of this promotion loop.
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote or join_promote)
if having_clause or force_having:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias].join_type == self.LOUTER:
j_col = self.alias_map[alias].rhs_join_col
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if self.is_nullable(field):
# In SQL NULL = anyvalue returns unknown, and NOT unknown
# is still unknown. However, in Python None = anyvalue is False
# (and not False is True...), and we want to return this Python's
# view of None handling. So we need to specifically exclude the
# NULL values, and because we are inside NOT branch they will
# be included in the final resultset. We are essentially creating
# SQL like this here: NOT (col IS NOT NULL), where the first NOT
# is added in upper layers of the code.
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None, force_having=False):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
if q_object.connector == OR and not force_having:
force_having = self.need_force_having(q_object)
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
if force_having:
self.having.start_subtree(connector)
else:
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases, force_having=force_having)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases, force_having=force_having)
if force_having:
self.having.end_subtree()
else:
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customized behavior.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
int_alias = None
for pos, name in enumerate(names):
if int_alias is not None:
exclusions.add(int_alias)
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions,
nullable=self.is_nullable(field))
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
# In case of a recursive FK, use the to_field for
# reverse lookups as well
if orig_field.model is local_field.model:
target = opts.get_field_by_name(
field.rel.field_name)[0]
else:
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
if int_alias is None:
to_avoid = alias
else:
to_avoid = int_alias
self.update_dupe_avoidance(dupe_opts, dupe_col, to_avoid)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim, nonnull_check=False):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves fewer table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
The 'nonnull_check' parameter is True when we are using inner joins
between tables explicitly to exclude NULL entries. In that case, the
tables shouldn't be trimmed, because the very action of joining to them
alters the result set.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and final > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]].lhs_join_col
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if (col != join.rhs_join_col or join.join_type != self.INNER or
nonnull_check):
break
self.unref_alias(alias)
alias = join.lhs_alias
col = join.lhs_join_col
join_list.pop()
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
# Adding extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0]
query.where.add((Constraint(alias, col, None), 'isnull', False), AND)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join.rhs_join_col:
self.unref_alias(final_alias)
final_alias = join.lhs_alias
col = join.lhs_join_col
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.model._meta.pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]].lhs_join_col
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info.lhs_join_col != select_col
or join_info.join_type != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info.rhs_alias
select_col = join_info.rhs_join_col
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
| {
"content_hash": "0a0c9ba3c829df498272024bc4b1a27c",
"timestamp": "",
"source": "github",
"line_count": 1999,
"max_line_length": 133,
"avg_line_length": 43.42421210605303,
"alnum_prop": 0.5706122919186682,
"repo_name": "akaihola/django",
"id": "7f331bfe7f837eab1596ff85d69b73dc3a6e1c71",
"size": "86805",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/models/sql/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "89027"
},
{
"name": "Python",
"bytes": "8042550"
},
{
"name": "Shell",
"bytes": "6521"
}
],
"symlink_target": ""
} |
import widget
import curses
import traceback
_stop = False
_WindowWidget = None
_cursorPos = (0,0)
_cursorEnabled = False
COLORPAIR_RED = 1
COLORPAIR_GREEN = 2
def _initColors():
curses.init_pair(COLORPAIR_RED, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(COLORPAIR_GREEN, curses.COLOR_GREEN, curses.COLOR_BLACK)
def setCursor(x,y):
global _cursorEnabled, _cursorPos
_cursorEnabled = True
_cursorPos = (x,y)
curses.curs_set(1)
def unsetCursor():
global _cursorEnabled, _cursorPos
_cursorEnabled = False
_cursorPos = (0,0)
curses.curs_set(0)
def stepFocus():
with open("stack","a") as f:
f.write("---\n")
traceback.print_stack(file=f)
if not _WindowWidget.changeFocus():
_WindowWidget.onFocus()
def end():
global _stop
_stop = True
def _redraw(stdscr):
global _WindowWidget
global _cursorPos, _cursorEnabled
(height, width) = stdscr.getmaxyx()
_WindowWidget.draw(stdscr, 0, 0, 0, 0, width, height-1)
_WindowWidget.draw(stdscr, 0, 0, 0, height-1, width-1, height)
if _cursorEnabled:
stdscr.move(_cursorPos[1], _cursorPos[0])
stdscr.refresh()
def _mainloop(stdscr):
global _stop
global _WindowWidget
_initColors()
_stop = False
(height, width) = stdscr.getmaxyx()
curses.cbreak()
curses.curs_set(0)
_WindowWidget.resize(width, height)
_WindowWidget.onFocus()
while not _stop:
stdscr.clear()
_redraw(stdscr)
key = stdscr.getch()
if key == curses.KEY_RESIZE:
(height, width) = stdscr.getmaxyx()
_WindowWidget.resize(width, height)
elif not _WindowWidget.keyEvent(key):
if key == ord('\t'):
stepFocus()
if key == curses.KEY_END:
end()
def mainloop(WindowWidget):
global _WindowWidget
_WindowWidget = WindowWidget
curses.wrapper(_mainloop)
| {
"content_hash": "84cc57fa5a78683dc73a748d0845eea9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 24.91025641025641,
"alnum_prop": 0.6258363355635616,
"repo_name": "thijsmie/madmin",
"id": "39cfaa008f77889c6ca7b0b8a98dcc5ef0ac7438",
"size": "1943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui_lib/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184405"
},
{
"name": "Shell",
"bytes": "740"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "byte_orbit.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "9bbb1a893c594267da47c201b0537c27",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.6208651399491094,
"repo_name": "roansong/weatherapp",
"id": "751b1762b0da5284393c267fc6630c731f0c8b19",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byte_orbit/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2216"
},
{
"name": "HTML",
"bytes": "3463"
},
{
"name": "Python",
"bytes": "19046"
}
],
"symlink_target": ""
} |
import json
import re
import os.path
# BEGIN OPENSOURCE
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# END OPENSOURCE
import tmp.benchmarks_pb2 as benchmarks_pb2
__file_size_map = {}
def __get_data_size(filename):
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename
if filename in __file_size_map:
return __file_size_map[filename]
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(
open(filename, "rb").read())
size = 0
count = 0
for payload in benchmark_dataset.payload:
size += len(payload)
count += 1
__file_size_map[filename] = (size, 1.0 * size / count)
return size, 1.0 * size / count
def __extract_file_name(file_name):
name_list = re.split(r"[/\.]", file_name)
short_file_name = ""
for name in name_list:
if name[:14] == "google_message":
short_file_name = name
return short_file_name
__results = []
# CPP results example:
# [
# "benchmarks": [
# {
# "bytes_per_second": int,
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_cpp_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
re.split("(_parse_|_serialize)", benchmark["name"])[0])
behavior = benchmark["name"][len(data_filename) + 1:]
if data_filename[:2] == "BM":
data_filename = data_filename[3:]
__results.append({
"language": "cpp",
"dataFilename": data_filename,
"behavior": behavior,
"throughput": benchmark["bytes_per_second"] / 2.0 ** 20
})
# Synthetic benchmark results example:
# [
# "benchmarks": [
# {
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_synthetic_result(filename):
if filename == "":
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
"language": "cpp",
"dataFilename": "",
"behavior": "synthetic",
"throughput": 10.0**9 / benchmark["cpu_time_ns"]
})
# Python results example:
# [
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ], #pure-python
# ...
# ]
def __parse_python_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
# Java results example:
# [
# {
# "id": string,
# "instrumentSpec": {...},
# "measurements": [
# {
# "weight": float,
# "value": {
# "magnitude": float,
# "unit": string
# },
# ...
# },
# ...
# ],
# "run": {...},
# "scenario": {
# "benchmarkSpec": {
# "methodName": string,
# "parameters": {
# defined parameters in the benchmark: parameters value
# },
# ...
# },
# ...
# }
#
# },
# ...
# ]
def __parse_java_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
total_value = 0
for measurement in result["measurements"]:
total_weight += measurement["weight"]
total_value += measurement["value"]["magnitude"]
avg_time = total_value * 1.0 / total_weight
total_size, _ = __get_data_size(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
__results.append({
"language": "java",
"throughput": total_size / avg_time * 1e9 / 2 ** 20,
"behavior": result["scenario"]["benchmarkSpec"]["methodName"],
"dataFilename": __extract_file_name(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
})
# Go benchmark results:
#
# goos: linux
# goarch: amd64
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op
# PASS
# ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s
def __parse_go_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
if last_dash == -1:
behavior = behavior_with_suffix
else:
behavior = behavior_with_suffix[:last_dash]
__results.append({
"dataFilename": __extract_file_name(full_filename),
"throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
"behavior": behavior,
"language": "go"
})
# Self built json results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def __parse_custom_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_js_result(filename, language):
return __parse_custom_result(filename, language)
def __parse_php_result(filename, language):
return __parse_custom_result(filename, language)
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file="",
node_file="",
php_c_file="",
php_file=""):
results = {}
if cpp_file != "":
__parse_cpp_result(cpp_file)
if java_file != "":
__parse_java_result(java_file)
if python_file != "":
__parse_python_result(python_file)
if go_file != "":
__parse_go_result(go_file)
if synthetic_file != "":
__parse_synthetic_result(synthetic_file)
if node_file != "":
__parse_js_result(node_file, "node")
if php_file != "":
__parse_php_result(php_file, "php")
if php_c_file != "":
__parse_php_result(php_c_file, "php")
return __results
| {
"content_hash": "2cae16d990651cc3edc973fc2cdf32b9",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 118,
"avg_line_length": 28.778523489932887,
"alnum_prop": 0.5502565298507462,
"repo_name": "endlessm/chromium-browser",
"id": "b09f387a6b07abad782f56c617b359afa26b5c49",
"size": "8710",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/protobuf/benchmarks/util/result_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import time
from platformio import __accounts_api__, app
from platformio.clients.http import HTTPClient, HTTPClientError
from platformio.exception import PlatformioException
class AccountError(PlatformioException):
MESSAGE = "{0}"
class AccountNotAuthorized(AccountError):
MESSAGE = "You are not authorized! Please log in to PlatformIO Account."
class AccountAlreadyAuthorized(AccountError):
MESSAGE = "You are already authorized with {0} account."
class AccountClient(HTTPClient): # pylint:disable=too-many-public-methods
SUMMARY_CACHE_TTL = 60 * 60 * 24 * 7
def __init__(self):
super().__init__(__accounts_api__)
@staticmethod
def get_refresh_token():
try:
return app.get_state_item("account").get("auth").get("refresh_token")
except: # pylint:disable=bare-except
raise AccountNotAuthorized()
@staticmethod
def delete_local_session():
app.delete_state_item("account")
@staticmethod
def delete_local_state(key):
account = app.get_state_item("account")
if not account or key not in account:
return
del account[key]
app.set_state_item("account", account)
def fetch_json_data(self, *args, **kwargs):
try:
return super().fetch_json_data(*args, **kwargs)
except HTTPClientError as exc:
raise AccountError(exc) from exc
def fetch_authentication_token(self):
if os.environ.get("PLATFORMIO_AUTH_TOKEN"):
return os.environ.get("PLATFORMIO_AUTH_TOKEN")
auth = app.get_state_item("account", {}).get("auth", {})
if auth.get("access_token") and auth.get("access_token_expire"):
if auth.get("access_token_expire") > time.time():
return auth.get("access_token")
if auth.get("refresh_token"):
try:
data = self.fetch_json_data(
"post",
"/v1/login",
headers={
"Authorization": "Bearer %s" % auth.get("refresh_token")
},
)
app.set_state_item("account", data)
return data.get("auth").get("access_token")
except AccountError:
self.delete_local_session()
raise AccountNotAuthorized()
def login(self, username, password):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
data = self.fetch_json_data(
"post",
"/v1/login",
data={"username": username, "password": password},
)
app.set_state_item("account", data)
return data
def login_with_code(self, client_id, code, redirect_uri):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
result = self.fetch_json_data(
"post",
"/v1/login/code",
data={"client_id": client_id, "code": code, "redirect_uri": redirect_uri},
)
app.set_state_item("account", result)
return result
def logout(self):
refresh_token = self.get_refresh_token()
self.delete_local_session()
try:
self.fetch_json_data(
"post",
"/v1/logout",
data={"refresh_token": refresh_token},
)
except AccountError:
pass
return True
def change_password(self, old_password, new_password):
return self.fetch_json_data(
"post",
"/v1/password",
data={"old_password": old_password, "new_password": new_password},
x_with_authorization=True,
)
def registration(
self, username, email, password, firstname, lastname
): # pylint:disable=too-many-arguments
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
return self.fetch_json_data(
"post",
"/v1/registration",
data={
"username": username,
"email": email,
"password": password,
"firstname": firstname,
"lastname": lastname,
},
)
def auth_token(self, password, regenerate):
return self.fetch_json_data(
"post",
"/v1/token",
data={"password": password, "regenerate": 1 if regenerate else 0},
x_with_authorization=True,
).get("auth_token")
def forgot_password(self, username):
return self.fetch_json_data(
"post",
"/v1/forgot",
data={"username": username},
)
def get_profile(self):
return self.fetch_json_data(
"get",
"/v1/profile",
x_with_authorization=True,
)
def update_profile(self, profile, current_password):
profile["current_password"] = current_password
self.delete_local_state("summary")
response = self.fetch_json_data(
"put",
"/v1/profile",
data=profile,
x_with_authorization=True,
)
return response
def get_account_info(self, offline=False):
account = app.get_state_item("account") or {}
if (
account.get("summary")
and account["summary"].get("expire_at", 0) > time.time()
):
return account["summary"]
if offline and account.get("email"):
return {
"profile": {
"email": account.get("email"),
"username": account.get("username"),
}
}
result = self.fetch_json_data(
"get",
"/v1/summary",
x_with_authorization=True,
)
account["summary"] = dict(
profile=result.get("profile"),
packages=result.get("packages"),
subscriptions=result.get("subscriptions"),
user_id=result.get("user_id"),
expire_at=int(time.time()) + self.SUMMARY_CACHE_TTL,
)
app.set_state_item("account", account)
return result
def get_logged_username(self):
return self.get_account_info(offline=True).get("profile").get("username")
def destroy_account(self):
return self.fetch_json_data(
"delete",
"/v1/account",
x_with_authorization=True,
)
def create_org(self, orgname, email, displayname):
return self.fetch_json_data(
"post",
"/v1/orgs",
data={"orgname": orgname, "email": email, "displayname": displayname},
x_with_authorization=True,
)
def get_org(self, orgname):
return self.fetch_json_data(
"get",
"/v1/orgs/%s" % orgname,
x_with_authorization=True,
)
def list_orgs(self):
return self.fetch_json_data(
"get",
"/v1/orgs",
x_with_authorization=True,
)
def update_org(self, orgname, data):
return self.fetch_json_data(
"put",
"/v1/orgs/%s" % orgname,
data={k: v for k, v in data.items() if v},
x_with_authorization=True,
)
def destroy_org(self, orgname):
return self.fetch_json_data(
"delete",
"/v1/orgs/%s" % orgname,
x_with_authorization=True,
)
def add_org_owner(self, orgname, username):
return self.fetch_json_data(
"post",
"/v1/orgs/%s/owners" % orgname,
data={"username": username},
x_with_authorization=True,
)
def list_org_owners(self, orgname):
return self.fetch_json_data(
"get",
"/v1/orgs/%s/owners" % orgname,
x_with_authorization=True,
)
def remove_org_owner(self, orgname, username):
return self.fetch_json_data(
"delete",
"/v1/orgs/%s/owners" % orgname,
data={"username": username},
x_with_authorization=True,
)
def create_team(self, orgname, teamname, description):
return self.fetch_json_data(
"post",
"/v1/orgs/%s/teams" % orgname,
data={"name": teamname, "description": description},
x_with_authorization=True,
)
def destroy_team(self, orgname, teamname):
return self.fetch_json_data(
"delete",
"/v1/orgs/%s/teams/%s" % (orgname, teamname),
x_with_authorization=True,
)
def get_team(self, orgname, teamname):
return self.fetch_json_data(
"get",
"/v1/orgs/%s/teams/%s" % (orgname, teamname),
x_with_authorization=True,
)
def list_teams(self, orgname):
return self.fetch_json_data(
"get",
"/v1/orgs/%s/teams" % orgname,
x_with_authorization=True,
)
def update_team(self, orgname, teamname, data):
return self.fetch_json_data(
"put",
"/v1/orgs/%s/teams/%s" % (orgname, teamname),
data={k: v for k, v in data.items() if v},
x_with_authorization=True,
)
def add_team_member(self, orgname, teamname, username):
return self.fetch_json_data(
"post",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
x_with_authorization=True,
)
def remove_team_member(self, orgname, teamname, username):
return self.fetch_json_data(
"delete",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
x_with_authorization=True,
)
| {
"content_hash": "74550ea236cca4ce82248bac5663ed52",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 86,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.5225843339050886,
"repo_name": "platformio/platformio",
"id": "2afe6fbe3da474a4d9ac80041b862549300f51df",
"size": "11105",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/clients/account.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1826"
},
{
"name": "Makefile",
"bytes": "356"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "333618"
},
{
"name": "Smarty",
"bytes": "45408"
}
],
"symlink_target": ""
} |
from blackbook.lib import collection_plus_json
__author__ = 'ievans3024'
class APIError(collection_plus_json.Error, BaseException):
"""
Wrapper class for API Errors
May be raised as a python exception, i.e.:
raise APIError()
May be inserted into a collection_plus_json.Collection instance, i.e.:
collection_plus_json.Collection(href="/foo/", error=APIError())
Convenience classes that subclass this:
APIBadRequestError 400 Bad Request
APINotAuthorizedError 401 Not Authorized
APIForbiddenError 403 Forbidden
APINotFoundError 404 Not Found
APIMethodNotAllowed 405 Method Not Allowed
APINotAcceptableError 406 Not Acceptable
APIConflictError 409 Conflict
APIGoneError 410 Gone
APIUnsupportedMediaTypeError 415 Unsupported Media Type
APIAuthenticationTimeoutError 419 Authentication Timeout
APITooManyRequestsError 429 Too Many Requests
APIInternalServerError 500 Internal Server Error
APINotImplementedError 501 Not Implemented
APIUnavailableError 503 Service Unavailable
These convenience classes are to allow for catching certain types of errors, e.g.:
try:
# stuff...
except APINotFoundError:
# handle resource not found
else:
# let other types of APIErrors get raised
Additionally, easier than typing out common errors every time they come up:
collection_plus_json.Collection(href="/foo/", error=APINotFoundError())
instead of
collection_plus_json.Collection(
href="/foo/",
error=APIError(
code="404",
title="Not Found",
message="The server could not find the requested resource."
)
)
"""
def __init__(self,
code="500",
title="Internal Server Error",
message="The server encountered an unexpected condition which prevented it from " +
"fulfilling the request.",
**kwargs):
"""
APIError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIError, self).__init__(code=code, message=message, title=title, **kwargs)
class APIBadRequestError(APIError):
"""Convenience class for HTTP 400 errors"""
def __init__(self,
code="400",
title="Bad Request",
message="The request could not be understood by the server due to malformed syntax.",
**kwargs):
"""
APIBadRequestError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIBadRequestError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIUnauthorizedError(APIError):
"""Convenience class for HTTP 401 errors"""
def __init__(self,
code="401",
title="Unauthorized",
message="The request requires user authentication.",
**kwargs):
"""
APIUnauthorizedError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIUnauthorizedError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIForbiddenError(APIError):
"""Convenience class for HTTP 403 errors"""
def __init__(self,
code="403",
title="Forbidden",
message="The server understood the request, but is refusing to fulfill it.",
**kwargs):
"""
APIForbiddenError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIForbiddenError, self).__init__(code=code, title=title, message=message, **kwargs)
class APINotFoundError(APIError):
"""Convenience class for HTTP 404 errors"""
def __init__(self,
code="404",
title="Not Found",
message="The server could not find the requested resource.",
**kwargs):
"""
APINotFoundError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APINotFoundError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIMethodNotAllowedError(APIError):
"""Convenience class for HTTP 405 errors"""
def __init__(self,
code="405",
title="Method Not Allowed",
message="The HTTP method specified in the request is not allowed for the requested resource.",
**kwargs):
"""
APIMethodNotAllowedError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIMethodNotAllowedError, self).__init__(code=code, title=title, message=message, **kwargs)
class APINotAcceptableError(APIError):
"""Convenience class for HTTP 406 errors"""
def __init__(self,
code="406",
title="Not Acceptable",
message="The requested resource cannot generate content deemed acceptable by the request.",
**kwargs):
"""
APINotAcceptableError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APINotAcceptableError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIConflictError(APIError):
"""Convenience class for HTTP 409 errors"""
def __init__(self,
code="409",
title="Conflict",
message="The request could not be completed due to a conflict with the current state of the resource.",
**kwargs):
"""
APIConflictError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIConflictError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIGoneError(APIError):
"""Convenience class for HTTP 410 errors"""
def __init__(self,
code="410",
title="Gone",
message="The requested resource is no longer available and no forwarding address is known.",
**kwargs):
"""
APIGoneError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIGoneError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIUnsupportableMediaTypeError(APIError):
"""Convenience class for HTTP 415 errors"""
def __init__(self,
code="415",
title="Unsupportable Media Type",
message="The content supplied in the request is not a type supported by the requested resource.",
**kwargs):
"""
APIUnsupportableMediaTypeError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIUnsupportableMediaTypeError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIAuthenticationTimeoutError(APIError):
"""Convenience class for HTTP 419 errors"""
def __init__(self,
code="419",
title="Authentication Timeout",
message="Previously valid authentication has expired. Please re-authenticate and try again.",
**kwargs):
"""
APIAuthenticationTimeoutError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIAuthenticationTimeoutError, self).__init__(code=code, title=title, message=message, **kwargs)
class APITooManyRequestsError(APIError):
"""Convenience class for HTTP 429 errors"""
def __init__(self,
code="429",
title="Too Many Requests",
message="The server is temporarily refusing to service requests made by the client " +
"due to too many requests being made by the client too frequently.",
**kwargs):
"""
APITooManyRequestsError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APITooManyRequestsError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIInternalServerError(APIError):
"""Convenience class for HTTP 500 errors"""
def __init__(self,
code="500",
title="Internal Server Error",
message="The server encountered an unexpected condition which prevented it from " +
"fulfilling the request.",
**kwargs):
"""
APIInternalServerError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIInternalServerError, self).__init__(code=code, title=title, message=message, **kwargs)
class APINotImplementedError(APIError):
"""Convenience class for HTTP 501 errors"""
def __init__(self,
code="501",
title="Not Implemented",
message="The server does not support the functionality required to fulfill the request.",
**kwargs):
"""
APINotImplementedError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APINotImplementedError, self).__init__(code=code, title=title, message=message, **kwargs)
class APIServiceUnavailableError(APIError):
"""Convenience class for HTTP 503 errors"""
def __init__(self,
code="503",
title="Service Unavailable",
message="The server is currently unable to handle the request due to a temporary " +
"overloading or maintenance of the server.",
**kwargs):
"""
APIServiceUnavailableError Constructor
:param code: The HTTP error code
:param title: The title of the error
:param message: The detailed error description
:param kwargs: Other nonstandard error information
:return:
"""
super(APIServiceUnavailableError, self).__init__(code=code, title=title, message=message, **kwargs) | {
"content_hash": "453b73c89a061c6fb26e12ea8e923f4b",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 120,
"avg_line_length": 36.99707602339181,
"alnum_prop": 0.6005690350114598,
"repo_name": "ievans3024/BlackBook",
"id": "5247625dc193d492e5ee491119c60319436eb694",
"size": "12653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blackbook/api/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2051"
},
{
"name": "HTML",
"bytes": "12625"
},
{
"name": "JavaScript",
"bytes": "19743"
},
{
"name": "Python",
"bytes": "122590"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2016 Alexandr Menkin
Use of this source code is governed by an MIT-style license that can be
found in the LICENSE file at https://github.com/sanchousic/diploma17-test-data-generation/blob/master/LICENSE
This file contains a set of functions for work with blender's nodes.
"""
import bpy
"""
How does z_depth count value using parameters (offset, size, use_min, min, use_max, max):
distance = distance to nearest not empty point on scene
result = size * distance + offset
if use_max and result > max then result = max
if use_min and result < min then result = min
"""
def init_for_z_depth(parameters):
"""build nodes to get images of z_depth channel
:param parameters: (offset, size, use_min, min, use_max, max)
offset: float
size: float
use_min: bool
min: float
use_max: bool
max: float
"""
offset, size, use_min, _min, use_max, _max = parameters
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for n in tree.nodes:
tree.nodes.remove(n)
r_layers_node = tree.nodes.new('CompositorNodeRLayers')
r_layers_node.location = 0, 200
r_layers_node.scene = bpy.data.scenes["Scene"]
composite_node = tree.nodes.new('CompositorNodeComposite')
composite_node.location = 200, 200
links.new(r_layers_node.outputs['Image'], composite_node.inputs['Image'])
map_value_node = tree.nodes.new('CompositorNodeMapValue')
map_value_node.offset[0] = offset
map_value_node.size[0] = size
map_value_node.use_min = use_min
map_value_node.min[0] = _min
map_value_node.use_max = use_max
map_value_node.max[0] = _max
map_value_node.location = 200, 0
links.new(r_layers_node.outputs['Z'], map_value_node.inputs['Value'])
invert_node = tree.nodes.new('CompositorNodeInvert')
invert_node.location = 400, 0
links.new(map_value_node.outputs['Value'], invert_node.inputs['Color'])
viewer_node = tree.nodes.new('CompositorNodeViewer')
viewer_node.location = 600, 0
links.new(invert_node.outputs['Color'], viewer_node.inputs['Image'])
return
def init_for_z_depth_with_noise(parameters):
"""build nodes to get images of z_depth channel with noise
:param parameters: (offset, size, use_min, min, use_max, max)
offset: float
size: float
use_min: bool
min: float
use_max: bool
max: float
"""
offset, size, use_min, _min, use_max, _max = parameters
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for n in tree.nodes:
tree.nodes.remove(n)
r_layers_node = tree.nodes.new('CompositorNodeRLayers')
r_layers_node.location = 0, 200
r_layers_node.scene = bpy.data.scenes["Scene"]
composite_node = tree.nodes.new('CompositorNodeComposite')
composite_node.location = 200, 200
links.new(r_layers_node.outputs['Image'], composite_node.inputs['Image'])
map_value_node = tree.nodes.new('CompositorNodeMapValue')
map_value_node.offset[0] = offset
map_value_node.size[0] = size
map_value_node.use_min = use_min
map_value_node.min[0] = _min
map_value_node.use_max = use_max
map_value_node.max[0] = _max
map_value_node.location = 200, 0
links.new(r_layers_node.outputs['Z'], map_value_node.inputs['Value'])
texture_node = tree.nodes.new('CompositorNodeTexture')
texture_node.texture = bpy.data.textures['Texture']
texture_node.location = 0, -300
texture_node.inputs[0].default_value = (10, 2, 5)
blur_node = tree.nodes.new('CompositorNodeBlur')
blur_node.inputs[1].default_value = 0.5
blur_node.location = 200, -300
links.new(texture_node.outputs['Color'], blur_node.inputs['Image'])
mix_node = tree.nodes.new('CompositorNodeMixRGB')
mix_node.blend_type = 'OVERLAY'
mix_node.inputs[0].default_value = 0.1
mix_node.location = 600, 0
links.new(map_value_node.outputs['Value'], mix_node.inputs[1])
links.new(blur_node.outputs['Image'], mix_node.inputs[2])
viewer_node = tree.nodes.new('CompositorNodeViewer')
viewer_node.location = 800, 0
links.new(mix_node.outputs['Image'], viewer_node.inputs['Image'])
return
def render_and_save_image_from_viewer_node(dir_path, prefix_name, index, extension):
""" this function renders and saves image from viewer node
with path: 'dir_path + prefix_name + number + . + format' where number is from 0000 to 9999
:param dir_path: string: with slash in the end
:param prefix_name: the first part of file's name
:param index: int: number, the last part of file's name, it should be 0..9999
:param extension: extension of file without '.', for example: 'png'
"""
file_path = dir_path + str(prefix_name) + str(index).zfill(4) + '.' + extension
bpy.ops.render.render()
bpy.data.images['Viewer Node'].save_render(filepath=file_path)
return | {
"content_hash": "78d0823eda0273193fa0bd7fdcb0315e",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 109,
"avg_line_length": 36.277372262773724,
"alnum_prop": 0.6704225352112676,
"repo_name": "sanchousic/diploma17-test-data-generation",
"id": "8046338059c8e4e23f688797cccb90d7af72a399",
"size": "4974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/blender_nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41666"
}
],
"symlink_target": ""
} |
from twisted.internet import reactor
import goldenrod, config
def execute(parser, bot, user, args):
if not parser.checkPerms(bot, user, "owner") or not bot.isWhisperRequest():
return
from goldenrod import channelInstances
channelsIamIn = "I am in: "
channelsIamIn += ", ".join(channel for channel in channelInstances)
bot.addressUser(user, "%s" % channelsIamIn)
def requiredPerm():
return "owner"
def canUseByWhisper():
return True
| {
"content_hash": "be930d6f1c60bc5dcc5dbdc7bcc0995c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 29.235294117647058,
"alnum_prop": 0.6720321931589537,
"repo_name": "Dabomstew/goldenrod",
"id": "e5c63d36124c014407ca92a268ffb96f9321d474",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/listchannels.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157738"
}
],
"symlink_target": ""
} |
'''
Make a signal mask and moment maps for the CO(2-1) data cube.
'''
from cube_analysis import run_pipeline
from paths import iram_co21_data_path
# Find a signal mask and derive moment arrays
run_pipeline(iram_co21_data_path("m33.co21_iram.fits"),
iram_co21_data_path("", no_check=True),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": iram_co21_data_path("m33.rms.fits"),
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
| {
"content_hash": "1b2649e95724ab87143e6af66b600d5b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 35.23809523809524,
"alnum_prop": 0.46621621621621623,
"repo_name": "e-koch/VLA_Lband",
"id": "bd383e5d7cba3d6bbdac9854d74b4975e6b05cc1",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ancillary_data/IRAM30m_CO21/14B-088/make_cube_outputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2740022"
},
{
"name": "Shell",
"bytes": "98570"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from sentry.tagstore.base import TagKeyStatus
class TagType(object):
def __repr__(self):
return '<%s: %s>' % (
type(self).__name__,
', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__),
)
def __hash__(self):
return hash(tuple([getattr(self, name) for name in self.__slots__]))
def __eq__(self, other):
return type(self) == type(other) and \
all(getattr(self, name) == getattr(other, name) for name in self.__slots__)
def __getstate__(self):
return {name: getattr(self, name) for name in self.__slots__}
def __setstate__(self, state):
for name, value in six.iteritems(state):
setattr(self, name, value)
class TagKey(TagType):
__slots__ = ['key', 'values_seen', 'status']
def __init__(self, key, values_seen, status=TagKeyStatus.VISIBLE, count=None, top_values=None):
self.key = key
self.values_seen = values_seen
self.status = status
self.count = count
self.top_values = top_values
class TagValue(TagType):
__slots__ = ['key', 'value', 'times_seen', 'first_seen', 'last_seen']
def __init__(self, key, value, times_seen, first_seen, last_seen):
self.key = key
self.value = value
self.times_seen = times_seen
self.first_seen = first_seen
self.last_seen = last_seen
class GroupTagKey(TagType):
__slots__ = ['group_id', 'key', 'values_seen']
def __init__(self, group_id, key, values_seen, count=None, top_values=None):
self.group_id = group_id
self.key = key
self.values_seen = values_seen
self.count = count
self.top_values = top_values
class GroupTagValue(TagType):
__slots__ = ['group_id', 'key', 'value', 'times_seen', 'first_seen', 'last_seen']
def __init__(self, group_id, key, value, times_seen, first_seen, last_seen):
self.group_id = group_id
self.key = key
self.value = value
self.times_seen = times_seen
self.first_seen = first_seen
self.last_seen = last_seen
from sentry.api.serializers import Serializer, register
@register(TagKey)
class TagKeySerializer(Serializer):
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'key': tagstore.get_standardized_key(obj.key),
'name': tagstore.get_tag_key_label(obj.key),
'uniqueValues': obj.values_seen,
}
@register(TagValue)
class TagValueSerializer(Serializer):
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'key': tagstore.get_standardized_key(obj.key),
'name': tagstore.get_tag_value_label(obj.key, obj.value),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'name': tagstore.get_tag_key_label(obj.key),
'key': tagstore.get_standardized_key(obj.key),
'uniqueValues': obj.values_seen,
}
@register(GroupTagValue)
class GroupTagValueSerializer(Serializer):
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'name': tagstore.get_tag_value_label(obj.key, obj.value),
'key': tagstore.get_standardized_key(obj.key),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
| {
"content_hash": "f2d9a7eeb96a0d6c2ead1d42c07c2480",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 99,
"avg_line_length": 29.387596899224807,
"alnum_prop": 0.5871801635452387,
"repo_name": "ifduyue/sentry",
"id": "a4d66887af35fec8a5c3b7d0840e480a5fd9219f",
"size": "3791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/tagstore/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, string_concat
import datetime
from .models import Ad
class LatestAdFeed(Feed):
title = _('Latest AGEPoly\'s classified ads')
link = reverse('polyclassifiedads.views.home')
description = _('Latest AGEPoly\'s classified ads')
def items(self):
now = datetime.date.today()
liste = Ad.objects.filter(is_validated=True, is_deleted=False, online_date__lte=now, offline_date__gte=now).order_by('-last_modification_date')
return liste[:10]
def item_title(self, ad):
return ad.title
def item_description(self, ad):
text = ad.content_formated()
if ad.price:
text = string_concat(text, "<br /><b>", _('Price'), "</b>: ", ad.price,)
return text
def item_link(self, ad):
return reverse('polyclassifiedads.views.show', args=(ad.pk,))
def item_author_name(self, ad):
return ad.author.get_full_name() if ad.author else _('Anonymous')
def item_author_email(self, ad):
return ad.contact_email
| {
"content_hash": "76564690c66d2215c1e49b37ebb4914c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 151,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.6552315608919382,
"repo_name": "PolyLAN/polyclassifiedads",
"id": "4c681ee145ff5ce45572fda54ec213d461103e2b",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polyclassifiedads/feeds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "36613"
},
{
"name": "HTML",
"bytes": "117213"
},
{
"name": "JavaScript",
"bytes": "277864"
},
{
"name": "Python",
"bytes": "99804"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
} |
import gc
import os
import sys
import signal
import weakref
from cStringIO import StringIO
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
int_handler = None
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
if self.int_handler is not None:
signal.signal(signal.SIGINT, self.int_handler)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# Can't use skipIf decorator because the signal handler may have
# been changed after defining this method.
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
self.skipTest("test requires SIGINT to not be ignored")
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform == "win32" or sys.platform == "cli", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakDefaultIntHandler(TestBreak):
int_handler = signal.default_int_handler
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform == "win32" or sys.platform == "cli", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalIgnored(TestBreak):
int_handler = signal.SIG_IGN
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform == "win32" or sys.platform == "cli", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreakSignalDefault(TestBreak):
int_handler = signal.SIG_DFL
| {
"content_hash": "2834f9cc72c89521be6000d05f96e5ec",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 96,
"avg_line_length": 34.221830985915496,
"alnum_prop": 0.6258874369791131,
"repo_name": "slozier/ironpython2",
"id": "850324a3a044c973dd04052522d5fc2c74c4998d",
"size": "9719",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/unittest/test/test_break.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "Batchfile",
"bytes": "4080"
},
{
"name": "C",
"bytes": "20290"
},
{
"name": "C#",
"bytes": "12157325"
},
{
"name": "C++",
"bytes": "69156"
},
{
"name": "HTML",
"bytes": "13181412"
},
{
"name": "JavaScript",
"bytes": "1656"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "67035"
},
{
"name": "Python",
"bytes": "26565467"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "193"
},
{
"name": "Smalltalk",
"bytes": "3"
},
{
"name": "VBScript",
"bytes": "974"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
'''Common parsing rules relevant for all grammars. See accompanying docs for
more information.'''
from camkes.ast import Import, Include
from .. import Exceptions
def p_import_statement(t):
'''import_statement : relative_import_statement
| builtin_import_statement'''
t[0] = t[1]
def p_relative_import_statement(t):
'''relative_import_statement : import STRING SEMI'''
t[0] = Import(t[2], relative=True, filename=t.lexer.filename, \
lineno=t.lexer.lineno)
def p_builtin_import_statement(t):
'''builtin_import_statement : import ANGLE_STRING SEMI'''
t[0] = Import(t[2], relative=False, filename=t.lexer.filename, \
lineno=t.lexer.lineno)
def p_include_statement(t):
'''include_statement : relative_include_statement
| builtin_include_statement'''
t[0] = t[1]
def p_relative_include_statement(t):
'''relative_include_statement : include STRING SEMI'''
t[0] = Include(t[2], relative=True, filename=t.lexer.filename, \
lineno=t.lexer.lineno)
def p_builtin_include_statement(t):
'''builtin_include_statement : include ANGLE_STRING SEMI'''
t[0] = Include(t[2], relative=False, filename=t.lexer.filename, \
lineno=t.lexer.lineno)
def p_list(t):
'''list : LSQUARE list_contents RSQUARE'''
t[0] = t[2]
def p_list_contents(t):
'''list_contents :
| container_element
| container_element COMMA list_contents'''
if len(t) == 1:
t[0] = []
elif len(t) == 2:
t[0] = [t[1]]
else:
assert len(t) == 4
t[0] = [t[1]] + t[3]
def p_container_element(t):
'''container_element : NUMBER
| DECIMAL
| STRING
| list
| dict'''
t[0] = t[1]
def p_dict(t):
'''dict : LBRACE dict_contents RBRACE'''
t[0] = t[2]
def p_dict_contents(t):
'''dict_contents :
| dict_key COLON container_element
| dict_key COLON container_element COMMA dict_contents'''
if len(t) == 1:
t[0] = {}
elif len(t) == 4:
t[0] = {t[1]:t[3]}
else:
assert len(t) == 6
t[0] = dict([(t[1], t[3])] + t[5].items())
def p_dict_key(t):
'''dict_key : NUMBER
| DECIMAL
| STRING'''
t[0] = t[1]
def p_error(t):
raise Exceptions.CAmkESSyntaxError(t)
| {
"content_hash": "9f5e0000e2ca9384abb1ce109c434f3a",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 29.321428571428573,
"alnum_prop": 0.5477060495330898,
"repo_name": "smaccm/camkes-tool",
"id": "febde111e5756d436e502f8ecefbfb32832cc8ef",
"size": "2689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "camkes/parser/input/GenericRules.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3407"
},
{
"name": "C",
"bytes": "383162"
},
{
"name": "C++",
"bytes": "740"
},
{
"name": "Isabelle",
"bytes": "242975"
},
{
"name": "Makefile",
"bytes": "38834"
},
{
"name": "Python",
"bytes": "229476"
},
{
"name": "Shell",
"bytes": "3298"
},
{
"name": "VimL",
"bytes": "3143"
}
],
"symlink_target": ""
} |
source = '''# line 1
'A module docstring.'
import sys, inspect
# line 5
# line 7
def spam(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
eggs(b + d, c + f)
# line 11
def eggs(x, y):
"A docstring."
global fr, st
fr = inspect.currentframe()
st = inspect.stack()
p = x
q = y / 0
# line 20
class StupidGit:
"""A longer,
indented
docstring."""
# line 27
def abuse(self, a, b, c):
"""Another
\tdocstring
containing
\ttabs
\t
"""
self.argue(a, b, c)
# line 40
def argue(self, a, b, c):
try:
spam(a, b, c)
except:
self.ex = sys.exc_info()
self.tr = inspect.trace()
# line 48
class MalodorousPervert(StupidGit):
pass
class ParrotDroppings:
pass
class FesteringGob(MalodorousPervert, ParrotDroppings):
pass
'''
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
# getsourcefile, getcomments, getsource, getclasstree, getargspec,
# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
# isdatadescriptor
from test.test_support import TestFailed, TESTFN
import sys, imp, os, string
def test(assertion, message, *args):
if not assertion:
raise TestFailed, message % args
import inspect
file = open(TESTFN, 'w')
file.write(source)
file.close()
# Note that load_source creates file TESTFN+'c' or TESTFN+'o'.
mod = imp.load_source('testmod', TESTFN)
files_to_clean_up = [TESTFN, TESTFN + 'c', TESTFN + 'o']
def istest(func, exp):
obj = eval(exp)
test(func(obj), '%s(%s)' % (func.__name__, exp))
for other in [inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback]:
if other is not func:
test(not other(obj), 'not %s(%s)' % (other.__name__, exp))
git = mod.StupidGit()
try:
1/0
except:
tb = sys.exc_traceback
istest(inspect.isbuiltin, 'sys.exit')
istest(inspect.isbuiltin, '[].append')
istest(inspect.isclass, 'mod.StupidGit')
istest(inspect.iscode, 'mod.spam.func_code')
istest(inspect.isframe, 'tb.tb_frame')
istest(inspect.isfunction, 'mod.spam')
istest(inspect.ismethod, 'mod.StupidGit.abuse')
istest(inspect.ismethod, 'git.argue')
istest(inspect.ismodule, 'mod')
istest(inspect.istraceback, 'tb')
import __builtin__
istest(inspect.isdatadescriptor, '__builtin__.file.closed')
istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
test(inspect.isroutine(mod.spam), 'isroutine(mod.spam)')
test(inspect.isroutine([].count), 'isroutine([].count)')
classes = inspect.getmembers(mod, inspect.isclass)
test(classes ==
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)], 'class list')
tree = inspect.getclasstree(map(lambda x: x[1], classes), 1)
test(tree ==
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert, mod.ParrotDroppings))
]
]
], 'class tree')
functions = inspect.getmembers(mod, inspect.isfunction)
test(functions == [('eggs', mod.eggs), ('spam', mod.spam)], 'function list')
test(inspect.getdoc(mod) == 'A module docstring.', 'getdoc(mod)')
test(inspect.getcomments(mod) == '# line 1\n', 'getcomments(mod)')
test(inspect.getmodule(mod.StupidGit) == mod, 'getmodule(mod.StupidGit)')
test(inspect.getfile(mod.StupidGit) == TESTFN, 'getfile(mod.StupidGit)')
test(inspect.getsourcefile(mod.spam) == TESTFN, 'getsourcefile(mod.spam)')
test(inspect.getsourcefile(git.abuse) == TESTFN, 'getsourcefile(git.abuse)')
def sourcerange(top, bottom):
lines = string.split(source, '\n')
return string.join(lines[top-1:bottom], '\n') + '\n'
test(inspect.getsource(git.abuse) == sourcerange(29, 39),
'getsource(git.abuse)')
test(inspect.getsource(mod.StupidGit) == sourcerange(21, 46),
'getsource(mod.StupidGit)')
test(inspect.getdoc(mod.StupidGit) ==
'A longer,\n\nindented\n\ndocstring.', 'getdoc(mod.StupidGit)')
test(inspect.getdoc(git.abuse) ==
'Another\n\ndocstring\n\ncontaining\n\ntabs', 'getdoc(git.abuse)')
test(inspect.getcomments(mod.StupidGit) == '# line 20\n',
'getcomments(mod.StupidGit)')
git.abuse(7, 8, 9)
istest(inspect.istraceback, 'git.ex[2]')
istest(inspect.isframe, 'mod.fr')
test(len(git.tr) == 3, 'trace() length')
test(git.tr[0][1:] == (TESTFN, 43, 'argue',
[' spam(a, b, c)\n'], 0),
'trace() row 2')
test(git.tr[1][1:] == (TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'trace() row 2')
test(git.tr[2][1:] == (TESTFN, 18, 'eggs', [' q = y / 0\n'], 0),
'trace() row 3')
test(len(mod.st) >= 5, 'stack() length')
test(mod.st[0][1:] ==
(TESTFN, 16, 'eggs', [' st = inspect.stack()\n'], 0),
'stack() row 1')
test(mod.st[1][1:] ==
(TESTFN, 9, 'spam', [' eggs(b + d, c + f)\n'], 0),
'stack() row 2')
test(mod.st[2][1:] ==
(TESTFN, 43, 'argue', [' spam(a, b, c)\n'], 0),
'stack() row 3')
test(mod.st[3][1:] ==
(TESTFN, 39, 'abuse', [' self.argue(a, b, c)\n'], 0),
'stack() row 4')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
test(args == ['x', 'y'], 'mod.fr args')
test(varargs == None, 'mod.fr varargs')
test(varkw == None, 'mod.fr varkw')
test(locals == {'x': 11, 'p': 11, 'y': 14}, 'mod.fr locals')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(x=11, y=14)', 'mod.fr formatted argvalues')
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.fr.f_back args')
test(varargs == 'g', 'mod.fr.f_back varargs')
test(varkw == 'h', 'mod.fr.f_back varkw')
test(inspect.formatargvalues(args, varargs, varkw, locals) ==
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})',
'mod.fr.f_back formatted argvalues')
for fname in files_to_clean_up:
try:
os.unlink(fname)
except:
pass
# Test for decorators as well.
source = r"""
def wrap(foo=None):
def wrapper(func):
return func
return wrapper
def replace(func):
def insteadfunc():
print 'hello'
return insteadfunc
# two decorators, one with argument
@wrap()
@wrap(wrap)
def wrapped():
pass
@replace
def gone():
pass"""
file = open(TESTFN + "2", "w")
file.write(source)
file.close()
files_to_clean_up = [TESTFN + "2", TESTFN + '2c', TESTFN + '2o']
mod2 = imp.load_source("testmod3", TESTFN + "2")
test(inspect.getsource(mod2.wrapped) == sourcerange(13, 16),
"inspect.getsource(mod.wrapped)")
test(inspect.getsource(mod2.gone) == sourcerange(8, 9),
"inspect.getsource(mod.gone)")
for fname in files_to_clean_up:
try:
os.unlink(fname)
except:
pass
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
test(expected == got, "expected %r mro, got %r", expected, got)
# Test classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', A) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'class method', A) in attrs, 'missing class method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', C) in attrs, 'missing plain method')
test(('m1', 'method', A) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
test(('s', 'static method', A) in attrs, 'missing static method')
test(('c', 'method', C) in attrs, 'missing plain method')
test(('p', 'property', A) in attrs, 'missing property')
test(('m', 'method', B) in attrs, 'missing plain method')
test(('m1', 'method', D) in attrs, 'missing plain method')
test(('datablob', 'data', A) in attrs, 'missing data')
args, varargs, varkw, defaults = inspect.getargspec(mod.eggs)
test(args == ['x', 'y'], 'mod.eggs args')
test(varargs == None, 'mod.eggs varargs')
test(varkw == None, 'mod.eggs varkw')
test(defaults == None, 'mod.eggs defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(x, y)', 'mod.eggs formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(mod.spam)
test(args == ['a', 'b', 'c', 'd', ['e', ['f']]], 'mod.spam args')
test(varargs == 'g', 'mod.spam varargs')
test(varkw == 'h', 'mod.spam varkw')
test(defaults == (3, (4, (5,))), 'mod.spam defaults')
test(inspect.formatargspec(args, varargs, varkw, defaults) ==
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)',
'mod.spam formatted argspec')
args, varargs, varkw, defaults = inspect.getargspec(A.m)
test(args == ['self'], 'A.m args')
test(varargs is None, 'A.m varargs')
test(varkw is None, 'A.m varkw')
test(defaults is None, 'A.m defaults')
# Doc/lib/libinspect.tex claims there are 11 such functions
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
test(count == 11, "There are %d (not 11) is* functions", count)
def sublistOfOne((foo)): return 1
args, varargs, varkw, defaults = inspect.getargspec(sublistOfOne)
test(args == [['foo']], 'sublistOfOne args')
test(varargs is None, 'sublistOfOne varargs')
test(varkw is None, 'sublistOfOne varkw')
test(defaults is None, 'sublistOfOn defaults')
| {
"content_hash": "a8163d2c6bb6b7ec82a2325793f6d329",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 76,
"avg_line_length": 30.094117647058823,
"alnum_prop": 0.6308053166536357,
"repo_name": "MalloyPower/parsing-python",
"id": "e3bf73a85c20b9c90a609c87d38308b879a6f4fd",
"size": "12790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/test/test_inspect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import os
from unittest import mock
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnfd_utils
from tacker.sol_refactored.infra_drivers.kubernetes import helm
from tacker.sol_refactored import objects
from tacker.tests.unit import base
CNF_SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d70a1177"
class TestHelm(base.TestCase):
def setUp(self):
super(TestHelm, self).setUp()
objects.register_all()
self.driver = helm.Helm()
cur_dir = os.path.dirname(__file__)
# NOTE: bollow a sample of k8s at the moment since it is enough
# for current tests.
sample_dir = os.path.join(cur_dir, "../..", "samples")
self.vnfd_1 = vnfd_utils.Vnfd(CNF_SAMPLE_VNFD_ID)
self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample2"))
def test_scale_invalid_parameter(self):
req = objects.ScaleVnfRequest(
type='SCALE_OUT',
aspectId='vdu1_aspect',
numberOfSteps=1
)
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
vnfcResourceInfo=[objects.VnfcResourceInfoV2(vduId='VDU1')],
metadata={
'namespace': 'default',
'release_name': 'test-release',
'helm_chart_path': 'Files/kubernetes/test-chart.tgz',
'helm_value_names': {'VDU2': {'replica': 'values.replica'}}
}
)
inst = objects.VnfInstanceV2(
instantiatedVnfInfo=inst_vnf_info
)
grant_req = objects.GrantRequestV1(
addResources=[
objects.ResourceDefinitionV1(
type='COMPUTE',
resourceTemplateId='VDU1'
)
]
)
expected_ex = sol_ex.HelmParameterNotFound(vdu_name='VDU1')
ex = self.assertRaises(sol_ex.HelmParameterNotFound,
self.driver._scale, req, inst, grant_req, mock.Mock(),
self.vnfd_1, mock.Mock(), mock.Mock())
self.assertEqual(expected_ex.detail, ex.detail)
| {
"content_hash": "467c3f2862cf5bd7ccbbbc06691a5e4d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 35.644067796610166,
"alnum_prop": 0.6019971469329529,
"repo_name": "openstack/tacker",
"id": "53c291e0c46184662164e901abbaefde215a644b",
"size": "2765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/tests/unit/sol_refactored/infra_drivers/kubernetes/test_helm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
} |
from psi.app.views.components import ReadonlyStringField
from flask_admin.model import InlineFormAdmin
from flask_babelex import lazy_gettext
from psi.app.views.base import ModelViewWithAccess
class PaymentMethodLineInlineAdmin(InlineFormAdmin):
form_args = dict(
account_name=dict(label=lazy_gettext('Account Name')),
account_number=dict(label=lazy_gettext('Account Number')),
bank_name=dict(label=lazy_gettext('Bank Name')),
bank_branch=dict(label=lazy_gettext('Bank Branch')),
remark=dict(label=lazy_gettext('Remark')),
)
class SupplierAdmin(ModelViewWithAccess):
from psi.app.models import PaymentMethod
form_excluded_columns = ('purchaseOrders', 'products', 'organization')
inline_models = (PaymentMethodLineInlineAdmin(PaymentMethod),)
column_details_exclude_list = ('organization','mnemonic')
column_exclude_list = ('organization', 'mnemonic', 'create_date')
column_editable_list = ['name', 'qq', 'phone', 'contact', 'email', 'website',
'whole_sale_req', 'can_mixed_whole_sale', 'remark']
column_details_list = ['id', 'external_id', 'name', 'qq', 'phone', 'contact',
'email', 'website', 'whole_sale_req',
'can_mixed_whole_sale', 'remark',
'paymentMethods']
column_searchable_list = ('name', 'external_id', 'name', 'qq', 'phone', 'mnemonic',
'contact', 'email', 'website', 'whole_sale_req', 'remark')
column_filters = ('can_mixed_whole_sale',)
form_overrides = dict(external_id=ReadonlyStringField)
form_columns = ('name', 'external_id', 'qq', 'phone', 'contact', 'email',
'website', 'whole_sale_req', 'can_mixed_whole_sale', 'remark',
'paymentMethods')
form_create_rules = form_columns
form_edit_rules = form_columns
# column_filters = column_searchable_list
column_labels = {
'id': lazy_gettext('id'),
'name': lazy_gettext('Name'),
'qq': lazy_gettext('QQ'),
'phone': lazy_gettext('Phone'),
'contact': lazy_gettext('Contact'),
'email': lazy_gettext('Email'),
'website': lazy_gettext('Website'),
'whole_sale_req': lazy_gettext('Whole Sale Req'),
'can_mixed_whole_sale': lazy_gettext('Can Mixed Whole Sale'),
'remark': lazy_gettext('Remark'),
'paymentMethods': lazy_gettext('Payment Methods'),
'external_id': lazy_gettext('External Id'),
}
| {
"content_hash": "c64cd4514e78df0f7e01a3a6b97808f1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 88,
"avg_line_length": 39.8125,
"alnum_prop": 0.6114599686028257,
"repo_name": "betterlife/psi",
"id": "2907e5acab72056d12757c920353ae6c6898d510",
"size": "2563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "psi/app/views/supplier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14410"
},
{
"name": "HTML",
"bytes": "52928"
},
{
"name": "JavaScript",
"bytes": "493605"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "528554"
}
],
"symlink_target": ""
} |
import itertools
import numpy as np
from ConfigSpace.forbidden import ForbiddenAndConjunction, ForbiddenEqualsClause
from autosklearn.pipeline.constants import (
DENSE,
INPUT,
PREDICTIONS,
SIGNED_DATA,
SPARSE,
UNSIGNED_DATA,
)
def get_match_array(pipeline, dataset_properties, include=None, exclude=None):
sparse = dataset_properties.get("sparse")
signed = dataset_properties.get("signed")
# Duck typing, not sure if it's good...
node_i_is_choice = []
node_i_choices = []
node_i_choices_names = []
all_nodes = []
for node_name, node in pipeline:
all_nodes.append(node)
is_choice = hasattr(node, "get_available_components")
node_i_is_choice.append(is_choice)
node_include = include.get(node_name) if include is not None else None
node_exclude = exclude.get(node_name) if exclude is not None else None
if is_choice:
node_i_choices_names.append(
list(
node.get_available_components(
dataset_properties, include=node_include, exclude=node_exclude
).keys()
)
)
node_i_choices.append(
list(
node.get_available_components(
dataset_properties, include=node_include, exclude=node_exclude
).values()
)
)
else:
node_i_choices.append([node])
matches_dimensions = [len(choices) for choices in node_i_choices]
# Start by allowing every combination of nodes. Go through all
# combinations/pipelines and erase the illegal ones
matches = np.ones(matches_dimensions, dtype=int)
pipeline_idxs = [range(dim) for dim in matches_dimensions]
for pipeline_instantiation_idxs in itertools.product(*pipeline_idxs):
pipeline_instantiation = [
node_i_choices[i][idx] for i, idx in enumerate(pipeline_instantiation_idxs)
]
data_is_sparse = sparse
dataset_is_signed = signed
for node in pipeline_instantiation:
node_input = node.get_properties()["input"]
node_output = node.get_properties()["output"]
# First check if these two instantiations of this node can work
# together. Do this in multiple if statements to maintain
# readability
if (
(data_is_sparse and SPARSE not in node_input)
or not data_is_sparse
and DENSE not in node_input
):
matches[pipeline_instantiation_idxs] = 0
break
# No need to check if the node can handle SIGNED_DATA; this is
# always assumed to be true
elif not dataset_is_signed and UNSIGNED_DATA not in node_input:
matches[pipeline_instantiation_idxs] = 0
break
if (
(
INPUT in node_output
and DENSE not in node_output
and SPARSE not in node_output
)
or PREDICTIONS in node_output
or (not data_is_sparse and DENSE in node_input and DENSE in node_output)
or (data_is_sparse and SPARSE in node_input and SPARSE in node_output)
):
# Don't change the data_is_sparse flag
pass
elif data_is_sparse and DENSE in node_output:
data_is_sparse = False
elif not data_is_sparse and SPARSE in node_output:
data_is_sparse = True
else:
print(node)
print("Data is sparse", data_is_sparse)
print(node_input, node_output)
raise ValueError("This combination is not allowed!")
if PREDICTIONS in node_output:
pass
elif (
INPUT in node_output
and SIGNED_DATA not in node_output
and UNSIGNED_DATA not in node_output
):
pass
elif SIGNED_DATA in node_output:
dataset_is_signed = True
elif UNSIGNED_DATA in node_output:
dataset_is_signed = False
else:
print(node)
print("Data is signed", dataset_is_signed)
print(node_input, node_output)
raise ValueError("This combination is not allowed!")
return matches
def find_active_choices(
matches, node, node_idx, dataset_properties, include=None, exclude=None
):
if not hasattr(node, "get_available_components"):
raise ValueError()
available_components = node.get_available_components(
dataset_properties, include=include, exclude=exclude
)
assert matches.shape[node_idx] == len(available_components), (
matches.shape[node_idx],
len(available_components),
)
choices = []
for c_idx, component in enumerate(available_components):
slices = tuple(
slice(None) if idx != node_idx else slice(c_idx, c_idx + 1)
for idx in range(len(matches.shape))
)
if np.sum(matches[slices]) > 0:
choices.append(component)
return choices
def add_forbidden(conf_space, pipeline, matches, dataset_properties, include, exclude):
# Not sure if this works for 3D
node_i_is_choice = []
node_i_choices_names = []
node_i_choices = []
all_nodes = []
for node_name, node in pipeline:
all_nodes.append(node)
is_choice = hasattr(node, "get_available_components")
node_i_is_choice.append(is_choice)
node_include = include.get(node_name) if include is not None else None
node_exclude = exclude.get(node_name) if exclude is not None else None
if is_choice:
node_i_choices_names.append(
node.get_available_components(
dataset_properties, include=node_include, exclude=node_exclude
).keys()
)
node_i_choices.append(
node.get_available_components(
dataset_properties, include=node_include, exclude=node_exclude
).values()
)
else:
node_i_choices_names.append([node_name])
node_i_choices.append([node])
# Find out all chains of choices. Only in such a chain its possible to
# have several forbidden constraints
choices_chains = []
idx = 0
while idx < len(pipeline):
if node_i_is_choice[idx]:
chain_start = idx
idx += 1
while idx < len(pipeline) and node_i_is_choice[idx]:
idx += 1
chain_stop = idx
choices_chains.append((chain_start, chain_stop))
idx += 1
for choices_chain in choices_chains:
constraints = set()
chain_start = choices_chain[0]
chain_stop = choices_chain[1]
chain_length = chain_stop - chain_start
# Add one to have also have chain_length in the range
for sub_chain_length in range(2, chain_length + 1):
for start_idx in range(chain_start, chain_stop - sub_chain_length + 1):
indices = range(start_idx, start_idx + sub_chain_length)
node_names = [pipeline[idx][0] for idx in indices]
num_node_choices = []
node_choice_names = []
skip_array_shape = []
for idx in indices:
node = all_nodes[idx]
available_components = node.get_available_components(
dataset_properties, include=node_i_choices_names[idx]
)
assert len(available_components) > 0, len(available_components)
skip_array_shape.append(len(available_components))
num_node_choices.append(range(len(available_components)))
node_choice_names.append([name for name in available_components])
# Figure out which choices were already abandoned
skip_array = np.zeros(skip_array_shape)
for product in itertools.product(*num_node_choices):
for node_idx, choice_idx in enumerate(product):
node_idx += start_idx
slices_ = tuple(
slice(None)
if idx != node_idx
else slice(choice_idx, choice_idx + 1)
for idx in range(len(matches.shape))
)
if np.sum(matches[slices_]) == 0:
skip_array[product] = 1
for product in itertools.product(*num_node_choices):
if skip_array[product]:
continue
slices = tuple(
slice(None)
if idx not in indices
else slice(
product[idx - start_idx], product[idx - start_idx] + 1
)
for idx in range(len(matches.shape))
)
# This prints the affected nodes
# print [node_choice_names[i][product[i]]
# for i in range(len(product))], \
# np.sum(matches[slices])
if np.sum(matches[slices]) == 0:
constraint = tuple(
[
(node_names[i], node_choice_names[i][product[i]])
for i in range(len(product))
]
)
# Check if a more general constraint/forbidden clause
# was already added
continue_ = False
for constraint_length in range(2, len(constraint)):
constr_starts = len(constraint) - constraint_length + 1
for constraint_start_idx in range(constr_starts):
constraint_end_idx = (
constraint_start_idx + constraint_length
)
sub_constraint = constraint[
constraint_start_idx:constraint_end_idx
]
if sub_constraint in constraints:
continue_ = True
break
if continue_:
break
if continue_:
continue
constraints.add(constraint)
forbiddens = []
for i in range(len(product)):
forbiddens.append(
ForbiddenEqualsClause(
conf_space.get_hyperparameter(
node_names[i] + ":__choice__"
),
node_choice_names[i][product[i]],
)
)
forbidden = ForbiddenAndConjunction(*forbiddens)
conf_space.add_forbidden_clause(forbidden)
return conf_space
| {
"content_hash": "fbc6d7cfa33c94d58b62886dfc30c79b",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 88,
"avg_line_length": 38.88,
"alnum_prop": 0.5041152263374485,
"repo_name": "automl/auto-sklearn",
"id": "dff69acc6ee528dacde194a06da1e6ba0a9b07fa",
"size": "11664",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "autosklearn/pipeline/create_searchspace_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "950"
},
{
"name": "Makefile",
"bytes": "3513"
},
{
"name": "Python",
"bytes": "2008151"
},
{
"name": "Shell",
"bytes": "4744"
}
],
"symlink_target": ""
} |
from setuptools import setup
PACKAGE = 'timingandestimationplugin'
setup(name='EduTracTimingAndEstimation',
description='Plugin to make EduTrac support time estimation and tracking',
keywords='trac plugin estimation timetracking',
version='1.2.7b',
url='http://www.trac-hacks.org/wiki/TimingAndEstimationPlugin',
license='MIT',
author='Russ Tyndall at Acceleration.net',
author_email='russ@acceleration.net',
long_description="""
This Trac 0.12 plugin provides support for Time estimation and tracking,
and permissions to view and set those fields
See http://trac-hacks.org/wiki/TimingAndEstimationPlugin for details.
""",
packages=[PACKAGE],
package_data={PACKAGE : ['templates/*.html', 'htdocs/js/*', 'htdocs/*.css', 'htdocs/*.js']},
entry_points={'trac.plugins': '%s = %s' % (PACKAGE, PACKAGE)})
#### FINANCIAL CONTRIBUTERS ####
#
# Obsidian Software: http://www.obsidiansoft.com/
# Enterprise Solutions for Functional Processor
# Design Verification
#
################################
#### AUTHORS ####
## Primary Author:
## Russell Tyndall
## Acceleration.net
## russ@acceleration.net
## trac-hacks user: bobbysmith007
##
## Alessio Massaro
## trac-hacks user: masariello
## Helped Get Reports working in postgres
## and started moving toward generic work
## rather than hours
## kkurzweil@lulu.com
## helped postegresql db backend compatiblity
## jonas
## made it so that base_url was unnecessary
## Colin Guthrie
## trac-hacks user: coling
## Refactored the custom reports code to make it
## easy for other plugins to provide reports to
## compliment those provided by default
## Added Javascript that improves Ticket UI
## Dave Abrahams <dave@boost-consulting.com>
##
## Genshi filters to remove T&E reports from the
## standard reports page, where they display errors
## Greg Troxel
##
## Updated the post commit hooks to be inline with upstream trac
## Tay Ray Chuan
##
## Added a stopwatch to the ticket pages
## Josh Godsiff, for www.oxideinteractive.com.au
## added props table client reformatting to remove extra whitespace
| {
"content_hash": "dbb0d65ed0e784b4ceb3376af1e2020e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 98,
"avg_line_length": 29.054054054054053,
"alnum_prop": 0.7037209302325581,
"repo_name": "lexqt/EduTracTimingAndEstimation",
"id": "27e2a877972da867dd42f11d86003946c22a6db0",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "12335"
},
{
"name": "Python",
"bytes": "66180"
},
{
"name": "Shell",
"bytes": "697"
}
],
"symlink_target": ""
} |
fname='h:\\tmp.reg'
import win32api, win32con, win32security, ntsecuritycon, pywintypes,os
## regsave will not overwrite a file
if os.path.isfile(fname):
os.remove(fname)
new_privs = ((win32security.LookupPrivilegeValue('',ntsecuritycon.SE_SECURITY_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_TCB_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_BACKUP_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_RESTORE_NAME),win32con.SE_PRIVILEGE_ENABLED)
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph,win32security.TOKEN_ALL_ACCESS|win32con.TOKEN_ADJUST_PRIVILEGES)
win32security.AdjustTokenPrivileges(th,0,new_privs)
my_sid = win32security.GetTokenInformation(th,ntsecuritycon.TokenUser)[0]
hklm=win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE,None,0,win32con.KEY_ALL_ACCESS)
skey=win32api.RegOpenKey(hklm,'SYSTEM',0,win32con.KEY_ALL_ACCESS)
sa=pywintypes.SECURITY_ATTRIBUTES()
sd=pywintypes.SECURITY_DESCRIPTOR()
sa.SECURITY_DESCRIPTOR=sd
acl=pywintypes.ACL()
pwr_sid = win32security.LookupAccountName('','Power Users')[0]
acl.AddAccessAllowedAce(win32con.ACL_REVISION,win32con.GENERIC_READ|win32con.ACCESS_SYSTEM_SECURITY,my_sid)
sd.SetSecurityDescriptorDacl(1,acl,0)
sd.SetSecurityDescriptorOwner(pwr_sid,0)
sa.bInheritHandle=1
assert sa.SECURITY_DESCRIPTOR is sd
win32api.RegSaveKey(skey,fname,sa)
| {
"content_hash": "32ac5fde2ed7c8f6b1e62fddc8b34f1a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 115,
"avg_line_length": 42.32432432432432,
"alnum_prop": 0.768837803320562,
"repo_name": "ntuecon/server",
"id": "100451441381363f56b48350592e7be2819e8f4a",
"size": "1566",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/win32/Demos/security/regsave_sa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import configparser
import hashlib
import os
import subprocess
import sys
Result = namedtuple("Result", ["returncode", "output"])
def git(*args, git_dir=None, capture_output=False, checked=True):
# Avoid forgetting this arg.
assert git_dir is None or os.path.isdir(git_dir)
command = ['git']
if git_dir:
command.append('--git-dir={0}'.format(git_dir))
command.extend(args)
stdout = subprocess.PIPE if capture_output else None
# Always let stderr print to the caller.
process = subprocess.Popen(
command,
stdin=subprocess.DEVNULL,
stdout=stdout,
universal_newlines=True)
output, _ = process.communicate()
if checked and process.returncode != 0:
sys.exit(1)
return Result(process.returncode, output)
def has_clone(url):
return os.path.exists(repo_cache_path(url))
def clone_if_needed(url):
repo_path = repo_cache_path(url)
if not has_clone(url):
# We look for this print in test, to count the number of clones we did.
print('git clone ' + url)
git('clone', '--mirror', '--progress', url, repo_path)
return repo_path
def repo_cache_path(url):
# Because peru gives each plugin a unique cache dir based on its cacheable
# fields (in this case, url) we could clone directly into cache_root.
# However, because the git plugin needs to handle git submodules as well,
# it still has to separate things out by repo url.
CACHE_ROOT = os.environ['PERU_PLUGIN_CACHE']
# If we just concatenate the escaped repo URL into the path, we start to
# run up against the 260-character path limit on Windows.
url_hash = hashlib.sha1(url.encode()).hexdigest()
return os.path.join(CACHE_ROOT, url_hash)
def git_fetch(url, repo_path):
print('git fetch ' + url)
git('fetch', '--prune', git_dir=repo_path)
def already_has_rev(repo, rev):
# Make sure the rev exists.
cat_result = git('cat-file', '-e', rev, git_dir=repo, checked=False)
if cat_result.returncode != 0:
return False
# Get the hash for the rev.
parse_result = git(
'rev-parse', rev, git_dir=repo, checked=False, capture_output=True)
if parse_result.returncode != 0:
return False
# Only return True for revs that are absolute hashes.
# We could consider treating tags the way, but...
# 1) Tags actually can change.
# 2) It's not clear at a glance if something is a branch or a tag.
# Keep it simple.
return parse_result.output.strip() == rev
def checkout_tree(url, rev, dest):
repo_path = clone_if_needed(url)
if not already_has_rev(repo_path, rev):
git_fetch(url, repo_path)
# If we just use `git checkout rev -- .` here, we get an error when rev is
# an empty commit.
git('--work-tree=' + dest, 'read-tree', rev, git_dir=repo_path)
git('--work-tree=' + dest, 'checkout-index', '--all', git_dir=repo_path)
checkout_submodules(url, repo_path, rev, dest)
def checkout_submodules(parent_url, repo_path, rev, work_tree):
if os.environ['PERU_MODULE_SUBMODULES'] == 'false':
return
gitmodules = os.path.join(work_tree, '.gitmodules')
if not os.path.exists(gitmodules):
return
parser = configparser.ConfigParser()
parser.read(gitmodules)
for section in parser.sections():
sub_relative_path = parser[section]['path']
sub_full_path = os.path.join(work_tree, sub_relative_path)
raw_sub_url = parser[section]['url']
# Submodules can begin with ./ or ../, in which case they're relative
# to the parent's URL. Handle this case.
sub_url = expand_relative_submodule_url(raw_sub_url, parent_url)
ls_tree = git(
'ls-tree',
rev,
sub_relative_path,
git_dir=repo_path,
capture_output=True).output
# Normally when you run `git submodule add ...`, git puts two things in
# your repo: an entry in .gitmodules, and a commit object at the
# appropriate path inside your repo. However, it's possible for those
# two to get out of sync, especially if you use mv/rm on a directory
# followed by `git add`, instead of the smarter `git mv`/`git rm`. If
# we run into one of these missing submodules, just skip it.
if len(ls_tree.strip()) == 0:
print('WARNING: submodule ' + sub_relative_path +
' is configured in .gitmodules, but missing in the repo')
continue
sub_rev = ls_tree.split()[2]
checkout_tree(sub_url, sub_rev, sub_full_path)
# According to comments in its own source code, git's implementation of
# relative submodule URLs is full of unintended corner cases. See:
# https://github.com/git/git/blob/v2.20.1/builtin/submodule--helper.c#L135
#
# We absolutely give up on trying to replicate their logic -- which probably
# isn't stable in any case -- and instead we just leave the dots in and let the
# host make sense of it. A quick sanity check on GitHub confirmed that that
# seems to work for now.
def expand_relative_submodule_url(raw_sub_url, parent_url):
if not raw_sub_url.startswith("./") and not raw_sub_url.startswith("../"):
return raw_sub_url
new_path = parent_url
if not new_path.endswith("/"):
new_path += "/"
new_path += raw_sub_url
return new_path
def plugin_sync(url, rev):
checkout_tree(url, rev, os.environ['PERU_SYNC_DEST'])
def plugin_reup(url, reup):
reup_output = os.environ['PERU_REUP_OUTPUT']
repo_path = clone_if_needed(url)
git_fetch(url, repo_path)
output = git(
'rev-parse', reup, git_dir=repo_path, capture_output=True).output
with open(reup_output, 'w') as out_file:
print('rev:', output.strip(), file=out_file)
def git_default_branch(url) -> str:
"""
This function checks if the default branch is master.
If it is not found, then it assumes it is main.
For other default branches, user should use the 'rev' option.
Args:
url (str): url from the target repository to be checked.
Returns:
str: returns a possible match for the git default branch.
"""
repo_path = clone_if_needed(url)
output = git('show-ref', '--verify', '--quiet', 'refs/heads/master',
git_dir=repo_path, checked=False, capture_output=True)
if output.returncode == 0:
return 'master'
else:
return 'main'
def main():
URL = os.environ['PERU_MODULE_URL']
default_branch = git_default_branch(URL)
REV = os.environ['PERU_MODULE_REV'] or default_branch
REUP = os.environ['PERU_MODULE_REUP'] or default_branch
command = os.environ['PERU_PLUGIN_COMMAND']
if command == 'sync':
plugin_sync(URL, REV)
elif command == 'reup':
plugin_reup(URL, REUP)
else:
raise RuntimeError('Unknown command: ' + repr(command))
if __name__ == "__main__":
main()
| {
"content_hash": "7d5c1566f4213108ae8d6a3cd04cb148",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 35.01,
"alnum_prop": 0.6428163381890888,
"repo_name": "buildinspace/peru",
"id": "4f1b9b6b7d74877d0ff67c49a4142193e569f71c",
"size": "7027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peru/resources/plugins/git/git_plugin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "325"
},
{
"name": "Makefile",
"bytes": "929"
},
{
"name": "Python",
"bytes": "289631"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
class TaskManager(object):
""" Multi threading manager """
def __init__(self):
""" Initializes a TaskManager
"""
self.threads = list()
def wait_until_exit(self):
""" Wait until all the threads are finished.
"""
[t.join() for t in self.threads]
self.threads = list()
def start_task(self, method, *args, **kwargs):
""" Start a task in a separate thread
Args:
method: the method to start in a separate thread
args: Accept args/kwargs arguments
"""
thread = threading.Thread(target=method, args=args, kwargs=kwargs)
thread.is_daemon = False
thread.start()
self.threads.append(thread)
| {
"content_hash": "4e9e9206aba93eb428984272d647e411",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 25.27777777777778,
"alnum_prop": 0.6032967032967033,
"repo_name": "nuagenetworks/monolithe",
"id": "852977666a0cd0be2b59ad7390397347c1355e8e",
"size": "2508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monolithe/lib/taskmanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "415189"
},
{
"name": "Smarty",
"bytes": "184108"
}
],
"symlink_target": ""
} |
"""Integration tests for the "client download" utility"""
from __future__ import absolute_import
import dtf.testutils as testutils
import dtf.core.utils as utils
class ClientDownloadTests(testutils.BasicIntegrationDeviceTest):
"""Wraper for integration tests"""
def test_download(self):
"""Do an download"""
rtn = self.run_cmd("client download /system/etc/hosts")
utils.delete_file("hosts")
assert(rtn.return_code == 0)
def test_download_local_exists(self):
"""Try to download a file that already exists"""
utils.touch("hosts")
rtn = self.run_cmd("client download /system/etc/hosts")
utils.delete_file("hosts")
assert(rtn.return_code == 255)
def test_download_path(self):
"""Do a download to a path"""
rtn = self.run_cmd("client download --path ./hosts /system/etc/hosts")
utils.delete_file("hosts")
assert(rtn.return_code == 0)
def test_download_not_installed(self):
"""Attempt to download with non-existent APK"""
rtn = self.run_cmd("client remove")
assert(rtn.return_code == 0)
rtn = self.run_cmd("client download /system/etc/hosts")
assert(rtn.return_code == 255)
rtn = self.run_cmd("client install")
assert(rtn.return_code == 0)
| {
"content_hash": "729eb87b5b1deb771fc1c5741fa37135",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 25.22641509433962,
"alnum_prop": 0.6260284218399401,
"repo_name": "android-dtf/dtf",
"id": "95a8ab61c1497abbbb3035490026f68567336377",
"size": "1977",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-dtf/tests/integration-device/client/test_client_download.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "26207"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "280892"
},
{
"name": "Shell",
"bytes": "27870"
}
],
"symlink_target": ""
} |
""" prune.py
Remove people and organizations we definitely don't want to add to the
graph.
Takes in a CSV (input.csv)
Creates a modified CSV (input_pruned.csv)
Creats a log of what it rejected (pruned.csv)
"""
import unicodecsv
import re
import os.path
import sys
def prune(filename, type):
"""
Prunes the file at `filename` according to the fixed rules present
in either prunePeople or pruneOrganizations
"""
input_file = open(filename, 'rb')
retained_file = open("%s_pruned.csv" % type, 'wb')
rejected_file = open("%s_pruned_rejected.csv" % type, 'wb')
# Read in field names
people_field_names = ["Id",
"full_name",
"salutation",
"first_name",
"middle_name",
"last_name",
"organization",
"address",
"address_delivery_point",
"address_city",
"address_postal",
"address_admin_area",
"address_country",
"email",
"phone",
"document",
"type",
"source",
"format"]
organization_field_names = ["Id",
"name",
"address",
"address_delivery_point",
"address_city",
"address_postal",
"address_admin_area",
"address_country",
"email",
"phone",
"document",
"type",
"source",
"format"]
# Do the processing
if type == "people":
prunePeople(input_file,
retained_file,
rejected_file,
people_field_names)
else:
pruneOrganizations(input_file,
retained_file,
rejected_file,
organization_field_names)
# Close out both files
input_file.close()
retained_file.close()
rejected_file.close()
def prunePeople(input_file, retained_file, rejected_file, field_names):
"""
Prunes the people in `input_file` and saves the cleaned up version
in `output_file`
"""
input_reader = unicodecsv.DictReader(input_file,
field_names)
retained_writer = unicodecsv.DictWriter(retained_file,
field_names)
rejected_writer = unicodecsv.DictWriter(rejected_file,
field_names)
# Write headers
retained_writer.writerow(dict(zip(field_names,
field_names)))
rejected_writer.writerow(dict(zip(field_names,
field_names)))
# Skip header line
input_reader.next()
# Set up rules
# TODO: Set up a regex for this?
junk_names = ['test', 'read', 'test1', 'test 2', 'asdf', 'adfs',
'test test', 'tret trert', 'GAA', 'BBB', 'tetqe', 'GGGG',
'You and your mentor']
patterns = {'wg': re.compile("NCEAS:?\s*\d+"),
'justnumbers': re.compile("^\d*$"),
'junknames': re.compile("^[a-z]{3,4}\s*\d*$"),
'noletters': re.compile("^[^a-zA-Z\u0000-\u007F]+$"),
'journal article': re.compile("\d+:\d+-\d+")}
# TODO add pruning for
# Journal of the Fisheries Research Board of Canada 33:2489-2499
# Journal of Fish Biology 13:203-213
# Prune
for row in input_reader:
# Should we prune the entire record?
should_prune = False
# Rule: Empty name
if len(row['full_name']) <= 0:
should_prune = True
# Rule: Junk name
if row['full_name'] in junk_names:
should_prune = True
# Don't prune unicode names
try:
bytes(row['full_name'])
except UnicodeEncodeError:
"""
Throwing this unicode error checks whether the string is unicode or
not. I'm not sure how robust this method is but it works AFAIK
"""
should_prune = False
# Should we prune the address field only?
# "Select state or territory here."
if re.compile("Select state or territory here").search(row['address']):
row['address'] = ''
# Should we prune the organization field only?
prune_organization = False
for pattern in patterns:
if patterns[pattern].search(row['organization']):
# Prune organization unless it's Unicode
prune_organization = True
try:
bytes(row['organization'])
except UnicodeEncodeError:
prune_organization = False
if prune_organization is True:
row['organization'] = ''
if should_prune is True:
rejected_writer.writerow(row)
else:
retained_writer.writerow(row)
def pruneOrganizations(input_file, retained_file, rejected_file, field_names):
"""
Prunes the organizations in `input_file` and saves the cleaned up
version in `output_file`
"""
input_reader = unicodecsv.DictReader(input_file,
field_names)
retained_writer = unicodecsv.DictWriter(retained_file,
field_names)
rejected_writer = unicodecsv.DictWriter(rejected_file,
field_names)
# Write headers
retained_writer.writerow(dict(zip(field_names,
field_names)))
rejected_writer.writerow(dict(zip(field_names,
field_names)))
# Skip header line
input_reader.next()
# Set up rules
junk_orgs = ["Select state or territory here.", "null", "test"]
patterns = {'wg': re.compile("NCEAS:?\s*\d+"),
'junk': re.compile("^[a-z]{3,4}\s*\d*$"),
'justnumbers': re.compile("^\d+$"),
'noletters': re.compile("^[^a-zA-Z]+$"),
'journal article': re.compile("\d+:\d+-\d+")}
# Prune
for row in input_reader:
should_prune = False
# Rule: Empty name
if len(row['name']) <= 0:
should_prune = True
for junk_org_name in junk_orgs:
if junk_org_name == row['name']:
should_prune = True
for pattern in patterns:
if patterns[pattern].search(row['name']):
should_prune = True
# Don't prune unicode names
try:
bytes(row['name'])
except UnicodeEncodeError:
"""
Throwing this unicode error checks whether the string is unicode or
not. I'm not sure how robust this method is but it works AFAIK
"""
should_prune = False
if should_prune is True:
rejected_writer.writerow(row)
else:
retained_writer.writerow(row)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Incorrect number of arguments. Please specifiy filename and " \
"filetype."
sys.exit()
filename = sys.argv[1]
filetype = sys.argv[2]
if not os.path.isfile(filename):
print "File at %s was not found. Exiting." % filename
sys.exit()
if filetype not in ["people", "organizations"]:
print "Filetype was not one of [people, organizations]. Exiting."
sys.exit()
prune(filename, filetype)
| {
"content_hash": "ed0bfda35536a39434c360270f127f21",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 31.92578125,
"alnum_prop": 0.4826868958766671,
"repo_name": "ec-geolink/d1lod",
"id": "d230a12d305b4c836ab9c27d99d99f97b35fd1f3",
"size": "8173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "d1lod/d1lod/people/graph/prune.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2289"
},
{
"name": "Go",
"bytes": "363"
},
{
"name": "HTML",
"bytes": "863"
},
{
"name": "Makefile",
"bytes": "1137"
},
{
"name": "Python",
"bytes": "223828"
},
{
"name": "Shell",
"bytes": "377"
}
],
"symlink_target": ""
} |
'''
Camera
======
Core class for acquiring the camera and converting its input into a
:class:`~kivy.graphics.texture.Texture`.
'''
__all__ = ('CameraBase', 'Camera')
import sys
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.core import core_select_lib
class CameraBase(EventDispatcher):
'''Abstract Camera Widget class.
Concrete camera classes must implement initialization and
frame capturing to a buffer that can be uploaded to the gpu.
:Parameters:
`index`: int
Source index of the camera.
`size` : tuple (int, int)
Size at which the image is drawn. If no size is specified,
it defaults to the resolution of the camera image.
`resolution` : tuple (int, int)
Resolution to try to request from the camera.
Used in the gstreamer pipeline by forcing the appsink caps
to this resolution. If the camera doesnt support the resolution,
a negotiation error might be thrown.
:Events:
`on_load`
Fired when the camera is loaded and the texture has become
available.
`on_frame`
Fired each time the camera texture is updated.
'''
__events__ = ('on_load', 'on_texture')
def __init__(self, **kwargs):
kwargs.setdefault('stopped', False)
kwargs.setdefault('resolution', (640, 480))
kwargs.setdefault('index', 0)
self.stopped = kwargs.get('stopped')
self._resolution = kwargs.get('resolution')
self._index = kwargs.get('index')
self._buffer = None
self._format = 'rgb'
self._texture = None
self.capture_device = None
kwargs.setdefault('size', self._resolution)
super(CameraBase, self).__init__()
self.init_camera()
if not self.stopped:
self.start()
def _set_resolution(self, res):
self._resolution = res
self.init_camera()
def _get_resolution(self):
return self._resolution
resolution = property(lambda self: self._get_resolution(),
lambda self, x: self._set_resolution(x),
doc='Resolution of camera capture (width, height)')
def _set_index(self, x):
if x == self._index:
return
self._index = x
self.init_camera()
def _get_index(self):
return self._x
index = property(lambda self: self._get_index(),
lambda self, x: self._set_index(x),
doc='Source index of the camera')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Return the camera texture with the latest capture')
def init_camera(self):
'''Initialise the camera (internal)'''
pass
def start(self):
'''Start the camera acquire'''
self.stopped = False
def stop(self):
'''Release the camera'''
self.stopped = True
def _update(self, dt):
'''Update the camera (internal)'''
pass
def _copy_to_gpu(self):
'''Copy the the buffer into the texture'''
if self._texture is None:
Logger.debug('Camera: copy_to_gpu() failed, _texture is None !')
return
self._texture.blit_buffer(self._buffer, colorfmt=self._format)
self._buffer = None
self.dispatch('on_texture')
def on_texture(self):
pass
def on_load(self):
pass
# Load the appropriate providers
providers = ()
if sys.platform == 'win32':
providers += (('videocapture', 'camera_videocapture',
'CameraVideoCapture'), )
if sys.platform != 'darwin':
providers += (('gstreamer', 'camera_gstreamer', 'CameraGStreamer'), )
providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), )
Camera = core_select_lib('camera', (providers))
| {
"content_hash": "0b5cbc122e5cf9ff6a01f670b5758961",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 76,
"avg_line_length": 28.194244604316548,
"alnum_prop": 0.5958152589946415,
"repo_name": "5y/kivy",
"id": "6e061e831859090c61a17b67afb1b349c4ccbedb",
"size": "3919",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kivy/core/camera/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory, TestCase
class DummyStorage:
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=''):
self.store.append(message)
def __len__(self):
return len(self.store)
def __getitem__(self, index):
return self.store[index]
class BaseRequestTestCase(TestCase):
"""
Extend django.test.TestCase with a create_request method.
BaseRequestTestCase must be subclassed with a user_factory attribute to
create a default user for the request.
A class- or function-based view can be attached to the test class as the
'view' attribute. get_view() returns a callable version of that
view, abstracting over whether it's class- or function-based.
"""
request_factory = RequestFactory
@classmethod
def get_view(cls):
"""
Returns the class's attached view, as a callable.
Checks self.view exists, and throws an ImproperlyConfigured exception
if it doesn't. Otherwise, it returns the view, ensuring it's callable.
"""
try:
view = cls.__dict__['view']
except KeyError:
message = "This test must have a 'view' attribute."
raise ImproperlyConfigured(message)
try:
return view.as_view()
except AttributeError:
return view
@classmethod
def view_instance(cls, request=None, *args, **kwargs):
"""
Returns an instance of the class's attached view.
Checks self.view exists, and throws an ImproperlyConfigured exception
if it doesn't. Otherwise, it returns the view instance with request,
args and kwargs set.
"""
try:
view = cls.view
except AttributeError:
message = "This test must have a 'view' attribute."
raise ImproperlyConfigured(message)
return view(request=request, args=args, kwargs=kwargs)
@staticmethod
def add_session_to_request(request):
"""Annotate a request object with a session."""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
def create_request(
self,
method='get',
url='/',
user=None,
auth=True,
add_session=False,
**kwargs
):
if user is None:
user = self.create_user(auth=auth)
request = getattr(self.request_factory(), method)(url, **kwargs)
request.user = user
if 'data' in kwargs:
request.DATA = kwargs['data']
request._messages = DummyStorage()
if add_session:
self.add_session_to_request(request)
return request
def create_user(self, auth=True, **kwargs):
if auth:
return self.user_factory.create(**kwargs)
else:
return AnonymousUser()
| {
"content_hash": "932f547f89dd66a270b759561ec42c0d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 29.367924528301888,
"alnum_prop": 0.6209444265981369,
"repo_name": "incuna/incuna-test-utils",
"id": "bfd54d3ee6e2f3378fe0f8a01247afb276eb698e",
"size": "3113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "incuna_test_utils/testcases/request.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "37"
},
{
"name": "Makefile",
"bytes": "255"
},
{
"name": "Python",
"bytes": "55986"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import abc
import typing as tp
from PIL import Image
from applitools.utils import ABC, image_utils, argument_guard
if tp.TYPE_CHECKING:
from applitools.utils.custom_types import AnyWebElement, Num
from applitools.selenium.frames import FrameChain
from applitools.selenium.webelement import EyesWebElement
from .geometry import Point, Region
T = tp.TypeVar('T', bound='EyesScreenshot')
__all__ = ('EyesScreenshot',)
class EyesScreenshot(ABC):
"""
Base class for handling screenshots.
"""
def __init__(self, image):
# type: (Image.Image) -> None
argument_guard.is_a(image, Image.Image)
self._screenshot = image
@staticmethod
@abc.abstractmethod
def calc_frame_location_in_screenshot(frame_chain, is_viewport_screenshot):
# type: (FrameChain, tp.Optional[bool]) -> Point
"""
:param frame_chain: List of the frames.
:param is_viewport_screenshot: Whether the viewport is a screenshot or not.
:return: The frame location as it would be on the screenshot. Notice that this value
might actually be OUTSIDE the screenshot (e.g, if this is a viewport screenshot and
the frame is located outside the viewport). This is not an error. The value can also
be negative.
"""
@abc.abstractmethod
def get_base64(self):
# type: () -> tp.Text
"""
Returns a base64 screenshot.
:return: The base64 representation of the png.
"""
@abc.abstractmethod
def get_intersected_region(self, region):
# type: (Region) -> Region
"""
Gets the intersection of the region with the screenshot image.
:param region: The region in the frame.
:return: The part of the region which intersects with
the screenshot image.
"""
@abc.abstractmethod
def get_location_relative_to_frame_viewport(self, location):
# type: (tp.Dict[tp.Text, Num]) -> tp.Dict[tp.Text, Num]
"""
Gets the relative location from a given location to the viewport.
:param location: A dict with 'x' and 'y' keys representing the location we want
to adjust.
:return: A location (keys are 'x' and 'y') adjusted to the current frame/viewport.
"""
@abc.abstractmethod
def get_element_region_in_frame_viewport(self, element):
# type: (AnyWebElement) -> Region
"""
Gets The element region in the frame.
:param element: The element to get the region in the frame.
:return: The element's region in the frame with scroll considered if necessary
"""
@abc.abstractmethod
def get_viewport_screenshot(self):
# type: () -> T
"""
Always return viewport size screenshot
"""
@abc.abstractmethod
def get_sub_screenshot_by_region(self, region):
# type: (Region) -> T
"""
Gets the region part of the screenshot image.
:param region: The region in the frame.
:return: A screenshot object representing the given region part of the image.
"""
@property
@abc.abstractmethod
def frame_chain(self):
# type: () -> FrameChain
"""
Returns a copy of the fram chain.
:return: A copy of the frame chain, as received by the driver when the screenshot was
created.
"""
def get_bytes(self):
# type: () -> bytes
"""
Returns the bytes of the screenshot.
:return: The bytes representation of the png.
"""
return image_utils.get_bytes(self._screenshot)
def get_intersected_region_by_element(self, element):
# type: (EyesWebElement) -> Region
"""
Gets the intersection of the element's region with the screenshot image.
:param element: The element in the frame.
:return: The part of the element's region which intersects with
the screenshot image.
"""
element_region = self.get_element_region_in_frame_viewport(element)
return self.get_intersected_region(element_region)
def get_sub_screenshot_by_element(self, element):
# type: (EyesWebElement) -> T
"""
Gets the element's region part of the screenshot image.
:param element: The element in the frame.
:return: A screenshot object representing the element's region part of the
image.
"""
element_region = self.get_element_region_in_frame_viewport(element)
return self.get_sub_screenshot_by_region(element_region)
| {
"content_hash": "d9b3558f404d927749eb740db39d4596",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 96,
"avg_line_length": 32.53472222222222,
"alnum_prop": 0.6271077908217716,
"repo_name": "applitools/eyes.selenium.python",
"id": "3d516c1d53dd307921112641b3831c1ba12a3ed6",
"size": "4685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applitools/core/capture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "107"
},
{
"name": "Python",
"bytes": "263819"
}
],
"symlink_target": ""
} |
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
QUERY = gql("""
query ($username: String!, $after: String) {
user(login: $username) {
starredRepositories(first: 100, after: $after, orderBy: {direction: DESC, field: STARRED_AT}) {
totalCount
nodes {
name
nameWithOwner
description
url
stargazerCount
forkCount
isPrivate
pushedAt
updatedAt
languages(first: 1, orderBy: {field: SIZE, direction: DESC}) {
edges {
node {
id
name
}
}
}
repositoryTopics(first: 100) {
nodes {
topic {
name
stargazerCount
}
}
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
"""
)
class Repository:
def __init__(self, name, description, language, url, stargazer_count, is_private, topics):
self.name = name
self.description = description
self.language = language
self.url = url
self.stargazer_count = stargazer_count
self.is_private = is_private
self.topics = topics
class GitHubGQL:
API_URL = "https://api.github.com/graphql"
def __init__(self, token):
self.token = token
headers = {"Authorization": f"Bearer {token}"}
self.transport = AIOHTTPTransport(url=self.API_URL, headers=headers)
self.client = Client(transport=self.transport, fetch_schema_from_transport=True)
def get_user_starred_by_username(self, username: str, after: str = '', topic_stargazer_count_limit: int = 0):
items = []
result = self.client.execute(QUERY, variable_values={"username": username, "after": after})
has_next = result['user']['starredRepositories']['pageInfo']['hasNextPage']
end_cursor = result['user']['starredRepositories']['pageInfo']['endCursor']
# total_count = result['user']['starredRepositories']['totalCount']
for repo in result['user']['starredRepositories']['nodes']:
name = repo['nameWithOwner']
description = repo['description'] if repo['description'] else ''
language = repo['languages']['edges'][0]['node']['name'] if repo['languages']['edges'] else ''
url = repo['url']
stargazer_count = repo['stargazerCount']
is_private = repo['isPrivate']
topics = [tag['topic']['name'] for tag in repo['repositoryTopics']['nodes'] if tag['topic']['stargazerCount'] > topic_stargazer_count_limit]
items.append(Repository(name, description, language, url, stargazer_count, is_private, topics))
if has_next:
items.extend(self.get_user_starred_by_username(username, end_cursor, topic_stargazer_count_limit))
return items
| {
"content_hash": "3b2da0436772362477f930e7e33b2010",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 152,
"avg_line_length": 35.616279069767444,
"alnum_prop": 0.5527260855370552,
"repo_name": "maguowei/starred",
"id": "b301917a7a89be422e298dc6a44b79cab6460c91",
"size": "3063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starred/githubgql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9327"
}
],
"symlink_target": ""
} |
import datetime
import os
import shutil
import socket
import sys
import threading
try:
import docker
from docker.errors import DockerException, APIError, InvalidVersion
from girder_worker.docker import nvidia
from requests.exceptions import ReadTimeout
except ImportError:
# These imports will not be available on the girder side.
pass
from girder_worker.app import app, Task
from girder_worker import logger
from girder_worker.docker import utils
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
from girder_worker.docker.io import (
FileDescriptorReader,
FDWriteStreamConnector,
FDReadStreamConnector,
FDStreamConnector,
StdStreamWriter
)
from girder_worker.docker.transforms import (
ContainerStdErr,
ContainerStdOut,
_TemporaryVolumeBase,
TemporaryVolume
)
from girder_worker_utils import _walk_obj
BLACKLISTED_DOCKER_RUN_ARGS = ['tty', 'detach']
def _pull_image(image):
"""
Pulls the specified Docker image onto this worker.
"""
client = docker.from_env(version='auto')
try:
client.images.pull(image)
except DockerException:
logger.exception('Error pulling Docker image %s:' % image)
raise
def _get_docker_network():
try:
ip = socket.gethostbyname(socket.gethostname())
if 'DOCKER_CLIENT_TIMEOUT' in os.environ:
timeout = int(os.environ['DOCKER_CLIENT_TIMEOUT'])
client = docker.from_env(version='auto', timeout=timeout)
else:
client = docker.from_env(version='auto')
for container in client.containers.list(all=True, filters={'status': 'running'}):
for nw in container.attrs['NetworkSettings']['Networks'].values():
if nw['IPAddress'] == ip:
return 'container:%s' % container.id
except Exception:
logger.exception('Failed to get docker network')
def _remove_stopped_container(client, name):
if name is None:
return
for container in client.containers.list(all=True, filters={'name': name}):
try:
logger.info('Removing container %s ' % (name))
container.remove()
except Exception:
pass
def _run_container(image, container_args, **kwargs):
# TODO we could allow configuration of non default socket
if 'DOCKER_CLIENT_TIMEOUT' in os.environ:
timeout = int(os.environ['DOCKER_CLIENT_TIMEOUT'])
client = docker.from_env(version='auto', timeout=timeout)
else:
client = docker.from_env(version='auto')
runtime = kwargs.pop('runtime', None)
origRuntime = runtime
if runtime is None and nvidia.is_nvidia_image(client.api, image):
runtime = 'nvidia'
container_args = [str(arg) for arg in container_args]
if 'network' not in kwargs and 'network_mode' not in kwargs:
docker_network = _get_docker_network()
if docker_network:
kwargs = kwargs.copy()
kwargs['network'] = docker_network
logger.info('Running container: image: %s args: %s runtime: %s kwargs: %s'
% (image, container_args, runtime, kwargs))
try:
name = None
try:
if runtime == 'nvidia' and kwargs.get('device_requests') is None:
# Docker < 19.03 required the runtime='nvidia' argument.
# Newer versions require a device request for some number of
# GPUs. This should handle either version of the docker
# daemon.
try:
device_requests_kwargs = kwargs.copy()
device_requests_kwargs['device_requests'] = [
docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])]
name = device_requests_kwargs.setdefault(
'name',
'girder_worker_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f'))
return client.containers.run(
image, container_args, **device_requests_kwargs)
except (APIError, InvalidVersion):
_remove_stopped_container(client, name)
pass
kwargs = kwargs.copy()
name = kwargs.setdefault(
'name',
'girder_worker_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f'))
return client.containers.run(
image, container_args, runtime=runtime, **kwargs)
except APIError:
_remove_stopped_container(client, name)
if origRuntime is None and runtime is not None:
kwargs = kwargs.copy()
name = kwargs.setdefault(
'name',
'girder_worker_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f'))
return client.containers.run(image, container_args, **kwargs)
else:
raise
except DockerException:
logger.exception('Exception when running docker container')
raise
class _SocketReader(FileDescriptorReader):
"""
Used to mediate the difference between the python 2/3 implementation of docker-py
with python 2 attach_socket(...) returns a socket like object, with python 3
it returns an instance of SocketIO.
"""
def __init__(self, socket):
self._socket = socket
def read(self, n):
# socket
if hasattr(self._socket, 'recv'):
return self._socket.recv(n)
# SocketIO
return self._socket.read(n)
def fileno(self):
return self._socket.fileno()
def close(self):
self._socket.close()
def _run_select_loop( # noqa: C901
task, container, read_stream_connectors, write_stream_connectors):
stdout = None
stderr = None
try:
# attach to standard streams
stdout = container.attach_socket(params={
'stdout': True,
'logs': True,
'stream': True
})
stderr = container.attach_socket(params={
'stderr': True,
'logs': True,
'stream': True
})
def exit_condition():
container.reload()
return container.status in {'exited', 'dead'} or task.canceled
# Look for ContainerStdOut and ContainerStdErr instances that need
# to be replace with the real container streams.
stdout_connected = False
for read_stream_connector in read_stream_connectors:
if isinstance(read_stream_connector.input, ContainerStdOut):
stdout_reader = _SocketReader(stdout)
read_stream_connector.output = DockerStreamPushAdapter(read_stream_connector.output)
read_stream_connector.input = stdout_reader
stdout_connected = True
break
stderr_connected = False
for read_stream_connector in read_stream_connectors:
if isinstance(read_stream_connector.input, ContainerStdErr):
stderr_reader = _SocketReader(stderr)
read_stream_connector.output = DockerStreamPushAdapter(read_stream_connector.output)
read_stream_connector.input = stderr_reader
stderr_connected = True
break
# If not stdout and stderr connection has been provided just use
# sys.stdXXX
if not stdout_connected:
stdout_reader = _SocketReader(stdout)
connector = FDReadStreamConnector(
stdout_reader,
DockerStreamPushAdapter(StdStreamWriter(sys.stdout)))
read_stream_connectors.append(connector)
if not stderr_connected:
stderr_reader = _SocketReader(stderr)
connector = FDReadStreamConnector(
stderr_reader,
DockerStreamPushAdapter(StdStreamWriter(sys.stderr)))
read_stream_connectors.append(connector)
# Run select loop
utils.select_loop(exit_condition=exit_condition,
readers=read_stream_connectors,
writers=write_stream_connectors)
if task.canceled:
try:
container.stop()
# Catch the ReadTimeout from requests and wait for container to
# exit. See https://github.com/docker/docker-py/issues/1374 for
# more details.
except ReadTimeout:
tries = 10
while tries > 0:
container.reload()
if container.status == 'exited':
break
if container.status != 'exited':
msg = 'Unable to stop container: %s' % container.id
logger.error(msg)
except DockerException as dex:
logger.error(dex)
raise
container.reload()
exit_code = container.attrs['State']['ExitCode']
if not task.canceled and exit_code != 0:
raise DockerException('Non-zero exit code from docker container (%d).' % exit_code)
finally:
# Close our stdout and stderr sockets
if stdout:
stdout.close()
if stderr:
stderr.close()
def _handle_streaming_args(args):
processed_args = []
write_streams = []
read_streams = []
def _maybe_path(arg):
if hasattr(arg, 'path'):
return arg.path()
# Don't pass anything
return ''
for arg in args:
if isinstance(arg, FDStreamConnector):
if isinstance(arg, FDWriteStreamConnector):
write_streams.append(arg)
arg = _maybe_path(arg.output)
elif isinstance(arg, FDReadStreamConnector):
read_streams.append(arg)
arg = _maybe_path(arg.input)
processed_args.append(arg)
return (processed_args, read_streams, write_streams)
class _RequestDefaultTemporaryVolume(_TemporaryVolumeBase):
def __init__(self):
super(_RequestDefaultTemporaryVolume, self).__init__(None, None)
self._make_paths()
def transform(self, **kwargs):
self._transformed = True
class DockerTask(Task):
def _maybe_transform_argument(self, arg):
return super(DockerTask, self)._maybe_transform_argument(
arg, task=self, _default_temp_volume=self.request._default_temp_volume)
def _maybe_transform_result(self, idx, result):
return super(DockerTask, self)._maybe_transform_result(
idx, result, _default_temp_volume=self.request._default_temp_volume)
def __call__(self, *args, **kwargs):
default_temp_volume = _RequestDefaultTemporaryVolume()
self.request._default_temp_volume = default_temp_volume
volumes = kwargs.setdefault('volumes', {})
# If we have a list of volumes, the user provide a list of Volume objects,
# we need to transform them.
temp_volumes = []
if isinstance(volumes, list):
# See if we have been passed any TemporaryVolume instances.
for v in volumes:
if isinstance(v, TemporaryVolume):
temp_volumes.append(v)
# First call the transform method, this we replace default temp volumes
# with the instance associated with this task create above. That is any
# reference to TemporaryVolume.default
_walk_obj(volumes, self._maybe_transform_argument)
# Now convert them to JSON
def _json(volume):
return volume._repr_json_()
volumes = _walk_obj(volumes, _json)
# We then need to merge them into a single dict and it will be ready
# for docker-py.
volumes = {k: v for volume in volumes for k, v in volume.items()}
kwargs['volumes'] = volumes
volumes.update(default_temp_volume._repr_json_())
super(DockerTask, self).__call__(*args, **kwargs)
threading.Thread(
target=self._cleanup_temp_volumes,
args=(temp_volumes, default_temp_volume),
daemon=True).start()
def _cleanup_temp_volumes(self, temp_volumes, default_temp_volume):
# Set the permission to allow cleanup of temp directories
temp_volumes = [v for v in temp_volumes if os.path.exists(v.host_path)]
to_chmod = temp_volumes[:]
# If our default_temp_volume instance has been transformed then we
# know it has been used and we have to clean it up.
if default_temp_volume._transformed:
to_chmod.append(default_temp_volume)
temp_volumes.append(default_temp_volume)
if len(to_chmod) > 0:
utils.chmod_writable([v.host_path for v in to_chmod])
for v in temp_volumes:
shutil.rmtree(v.host_path)
def _docker_run(task, image, pull_image=True, entrypoint=None, container_args=None,
volumes=None, remove_container=True, stream_connectors=None, **kwargs):
volumes = volumes or {}
stream_connectors = stream_connectors or []
container_args = container_args or []
if pull_image:
logger.info('Pulling Docker image: %s', image)
_pull_image(image)
if entrypoint is not None and not isinstance(entrypoint, (list, tuple)):
entrypoint = [entrypoint]
run_kwargs = {
'tty': False,
'volumes': volumes,
'detach': True
}
# Allow run args to be overridden,filter out any we don't want to override
extra_run_kwargs = {k: v for k, v in kwargs.items() if k not in BLACKLISTED_DOCKER_RUN_ARGS}
run_kwargs.update(extra_run_kwargs)
if entrypoint is not None:
run_kwargs['entrypoint'] = entrypoint
container_args, read_streams, write_streams = _handle_streaming_args(container_args)
for connector in stream_connectors:
if isinstance(connector, FDReadStreamConnector):
read_streams.append(connector)
elif isinstance(connector, FDWriteStreamConnector):
write_streams.append(connector)
else:
raise TypeError(
"Expected 'FDReadStreamConnector' or 'FDWriterStreamConnector', received '%s'"
% type(connector))
# We need to open any read streams before starting the container, so the
# underling named pipes are opened for read.
for stream in read_streams:
stream.open()
container = _run_container(image, container_args, **run_kwargs)
try:
_run_select_loop(task, container, read_streams, write_streams)
finally:
if container and remove_container:
container.reload()
# If the container is still running issue a warning
if container.status == 'running':
logger.warning('Container is still running, unable to remove.')
else:
container.remove()
# return an array of None's equal to number of entries in the girder_result_hooks
# header, in order to trigger processing of the container outputs.
results = []
if hasattr(task.request, 'girder_result_hooks'):
results = (None,) * len(task.request.girder_result_hooks)
return results
@app.task(base=DockerTask, bind=True)
def docker_run(task, image, pull_image=True, entrypoint=None, container_args=None,
volumes=None, remove_container=True, **kwargs):
"""
This task runs a docker container. For details on how to use this task, see the
:ref:`docker-run` guide.
:param task: The bound task reference.
:type task: :py:class:`girder_worker.task.Task`
:param image: The docker image identifier.
:type image: str
:param pull_image: Whether to explicitly pull the image prior to running the container.
:type pull_image: bool
:param entrypoint: Alternative entrypoint to use when running the container.
:type entrypoint: str
:param container_args: Arguments to pass to the container.
:type container_args: list
:param volumes: Volumes to expose to the container.
:type volumes: dict
:param remove_container: Whether to delete the container after the task is done.
:type remove_container: bool
:return: Fulfilled result hooks.
:rtype: list
"""
return _docker_run(
task, image, pull_image, entrypoint, container_args, volumes,
remove_container, **kwargs)
| {
"content_hash": "bab8972e8d4b0dd2ad5149b7824710f4",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 100,
"avg_line_length": 36.67333333333333,
"alnum_prop": 0.6105556565472944,
"repo_name": "girder/girder_worker",
"id": "b22a5b068b01c9782149db8179b5f8f94c92ec8d",
"size": "16503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girder_worker/docker/tasks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "2021"
},
{
"name": "Dockerfile",
"bytes": "1322"
},
{
"name": "JavaScript",
"bytes": "8248"
},
{
"name": "Makefile",
"bytes": "1519"
},
{
"name": "Pug",
"bytes": "4115"
},
{
"name": "Python",
"bytes": "268215"
},
{
"name": "Shell",
"bytes": "8215"
},
{
"name": "Stylus",
"bytes": "1454"
}
],
"symlink_target": ""
} |
"""
CPIncomp - Jorrit's collection of routines for fitting incompressible liquids
=====
readme.md - General instructions and copyright information / credits.
"""
from __future__ import division, absolute_import, print_function
import inspect
from . import DataObjects,ExampleObjects,PureFluids,CoefficientFluids,DigitalFluids,MelinderFluids,SolutionFluids
from .SecCoolFluids import SecCoolSolutionData
def getBaseClassNames():
"""
Returns a list of names of the abstract base
classes that should not be instantiated. Can
be used to build an ignore list.
"""
ignList = []
for i in inspect.getmembers(DataObjects):
if inspect.isclass(i[1]):
ignList.append(i[0])
return ignList
def getExampleNames(obj=False):
"""
Returns a list of names of the example fluid
classes that should not be treated like the
normal fluids. Can be used to build an ignore
list.
Use the obj switch to return the objects instead
of the names.
"""
ignList = getBaseClassNames()
outList = []
for i in inspect.getmembers(ExampleObjects):
if inspect.isclass(i[1]):
if i[0] not in ignList:
if obj: outList.append(i[1]())
else: outList.append(i[0])
return outList
def getIgnoreNames():
"""
Returns a list of names of classes that should
not be treated like the normal fluids.
"""
ignList = []
ignList += getBaseClassNames()
ignList += getExampleNames()
return ignList
def getCoefficientFluids():
"""
Returns a list of CoefficientData objects, which
already contain all coefficients. These objects
can be written to JSON without further processing.
"""
classes = []
ignList = getIgnoreNames()
for name, obj in inspect.getmembers(CoefficientFluids):
if inspect.isclass(obj):
#print(name)
if not name in ignList: # Ignore the base classes
classes.append(obj())
return classes
def getDigitalFluids():
"""
Returns a list of DigitalData objects, which
contain data for the fitting. These objects
only hold the data and you still have to call
the fitting routines.
a) Data in these classes is based on equations
that cannot be converted to the forms available
in CoolProp.
b) There are no equations available, but the
fluid data can be accessed from python in the
form of another library.
c) There are data files that contain the experimental
data. The fit has to be done after laoding the fluids.
"""
classes = []
ignList = getIgnoreNames()
for name, obj in inspect.getmembers(DigitalFluids):
if inspect.isclass(obj):
#print(name)
if not name in ignList: # Ignore the base classes
classes.append(obj())
return classes
def getMelinderFluids():
"""
Returns a list of CoefficientData objects, which
already contain all coefficients. These objects
can be written to JSON without further processing.
All coefficients are taken from the same reference:
"Properties of Secondary Working Fluids for Indirect Systems"
written by Aake Melinder and published in 2010 by IIR
"""
classes = []
ignList = getIgnoreNames()
for name, obj in inspect.getmembers(MelinderFluids):
if inspect.isclass(obj):
#print(name)
if not name in ignList: # Ignore the base classes
classes.append(obj())
return classes
def getPureFluids():
"""
Returns a list of SolutionData objects, which
contain data for fitting pure fluids. These
objects only hold the data and you still have
to call the fitting routines.
"""
classes = []
ignList = getIgnoreNames()
for name, obj in inspect.getmembers(PureFluids):
if inspect.isclass(obj):
#print(name)
if not name in ignList: # Ignore the base classes
classes.append(obj())
return classes
def getSolutionFluids():
"""
Returns a list of SolutionData objects, which
contain data for fitting solutions. These
objects only hold the data and you still have
to call the fitting routines.
"""
classes = []
ignList = getIgnoreNames()
for name, obj in inspect.getmembers(SolutionFluids):
if inspect.isclass(obj):
#print(name)
if not name in ignList: # Ignore the base classes
classes.append(obj())
return classes
def getSecCoolFluids():
"""
Returns a list of DigitalData objects, which
contain data for the fits. All objects here
implement the fitFluid() function, which can
be called to set the coefficients before writing
the JSON files.
"""
return SecCoolSolutionData.factory()
def get_version():
return 0.5
if __name__ == "__main__":
print('You are using version %s of the Python package for incompressible liquids in CoolProp.'%(get_version()))
print()
| {
"content_hash": "f07b0141f499499ea4fa9ca6c4483b74",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 115,
"avg_line_length": 30.227544910179642,
"alnum_prop": 0.6614500792393027,
"repo_name": "JonWel/CoolProp",
"id": "38438150f656035313dd7f45fc5c965e1bc72787",
"size": "5048",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dev/incompressible_liquids/CPIncomp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3633"
},
{
"name": "C",
"bytes": "266838"
},
{
"name": "C#",
"bytes": "56794"
},
{
"name": "C++",
"bytes": "2493384"
},
{
"name": "CMake",
"bytes": "105060"
},
{
"name": "CSS",
"bytes": "8226"
},
{
"name": "FORTRAN",
"bytes": "6439"
},
{
"name": "HTML",
"bytes": "10898"
},
{
"name": "Julia",
"bytes": "14714"
},
{
"name": "Jupyter Notebook",
"bytes": "112217"
},
{
"name": "Lua",
"bytes": "9624"
},
{
"name": "M",
"bytes": "120"
},
{
"name": "Makefile",
"bytes": "27039"
},
{
"name": "Mathematica",
"bytes": "4264"
},
{
"name": "Matlab",
"bytes": "7828"
},
{
"name": "Objective-C",
"bytes": "856"
},
{
"name": "Pascal",
"bytes": "41142"
},
{
"name": "Python",
"bytes": "1441196"
},
{
"name": "Scilab",
"bytes": "1684"
},
{
"name": "Shell",
"bytes": "21582"
},
{
"name": "TeX",
"bytes": "124794"
},
{
"name": "Visual Basic",
"bytes": "19189"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0021_auto_20151222_1744'),
]
operations = [
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(max_length=254, null=True, blank=True),
),
]
| {
"content_hash": "9d349025921455b5cc66a22b2c85636d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 22.38888888888889,
"alnum_prop": 0.6004962779156328,
"repo_name": "tfiers/arenberg-online",
"id": "57ff47f6d179fb0ddec6763f32049ea087333786",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ticketing/migrations/0022_auto_20160102_1349.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "62"
},
{
"name": "CSS",
"bytes": "31305"
},
{
"name": "HTML",
"bytes": "230598"
},
{
"name": "JavaScript",
"bytes": "96170"
},
{
"name": "Python",
"bytes": "178246"
}
],
"symlink_target": ""
} |
def topological_sort_as_sets(dependency_graph):
"""
Variation of Kahn's algorithm (1962) that returns sets.
Take a dependency graph as a dictionary of node => dependencies.
Yield sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if not deps}
if not current:
raise ValueError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(l, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in l:
if node in layer:
result.append(node)
return result
| {
"content_hash": "99de23df8f70c012ed7dac8f43243b4f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 35.46875,
"alnum_prop": 0.6431718061674009,
"repo_name": "ebar0n/django",
"id": "e0a22c92362f738caec241cc9cf7808fc59c6544",
"size": "1135",
"binary": false,
"copies": "30",
"ref": "refs/heads/master",
"path": "django/db/migrations/topological_sort.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84168"
},
{
"name": "HTML",
"bytes": "219501"
},
{
"name": "JavaScript",
"bytes": "255420"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12239901"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List
import datahub.emitter.mce_builder as builder
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.aws.sagemaker_processors.common import (
SagemakerSourceReport,
)
from datahub.metadata.com.linkedin.pegasus2avro.common import MLFeatureDataType
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
MLFeatureSnapshot,
MLFeatureTableSnapshot,
MLPrimaryKeySnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
BrowsePathsClass,
MLFeaturePropertiesClass,
MLFeatureTablePropertiesClass,
MLPrimaryKeyPropertiesClass,
)
if TYPE_CHECKING:
from mypy_boto3_sagemaker import SageMakerClient
from mypy_boto3_sagemaker.type_defs import (
DescribeFeatureGroupResponseTypeDef,
FeatureDefinitionTypeDef,
FeatureGroupSummaryTypeDef,
)
@dataclass
class FeatureGroupProcessor:
sagemaker_client: "SageMakerClient"
env: str
report: SagemakerSourceReport
def get_all_feature_groups(self) -> List["FeatureGroupSummaryTypeDef"]:
"""
List all feature groups in SageMaker.
"""
feature_groups = []
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_feature_groups
paginator = self.sagemaker_client.get_paginator("list_feature_groups")
for page in paginator.paginate():
feature_groups += page["FeatureGroupSummaries"]
return feature_groups
def get_feature_group_details(
self, feature_group_name: str
) -> "DescribeFeatureGroupResponseTypeDef":
"""
Get details of a feature group (including list of component features).
"""
# see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.describe_feature_group
feature_group = self.sagemaker_client.describe_feature_group(
FeatureGroupName=feature_group_name
)
# use falsy fallback since AWS stubs require this to be a string in tests
next_token = feature_group.get("NextToken", "")
# paginate over feature group features
while next_token:
next_features = self.sagemaker_client.describe_feature_group(
FeatureGroupName=feature_group_name, NextToken=next_token
)
feature_group["FeatureDefinitions"] += next_features["FeatureDefinitions"]
next_token = feature_group.get("NextToken", "")
return feature_group
def get_feature_group_wu(
self, feature_group_details: "DescribeFeatureGroupResponseTypeDef"
) -> MetadataWorkUnit:
"""
Generate an MLFeatureTable workunit for a SageMaker feature group.
Parameters
----------
feature_group_details:
ingested SageMaker feature group from get_feature_group_details()
"""
feature_group_name = feature_group_details["FeatureGroupName"]
feature_group_snapshot = MLFeatureTableSnapshot(
urn=builder.make_ml_feature_table_urn("sagemaker", feature_group_name),
aspects=[
BrowsePathsClass(paths=[f"/sagemaker/{feature_group_name}"]),
],
)
feature_group_snapshot.aspects.append(
MLFeatureTablePropertiesClass(
description=feature_group_details.get("Description"),
# non-primary key features
mlFeatures=[
builder.make_ml_feature_urn(
feature_group_name,
feature["FeatureName"],
)
for feature in feature_group_details["FeatureDefinitions"]
if feature["FeatureName"]
!= feature_group_details["RecordIdentifierFeatureName"]
],
mlPrimaryKeys=[
builder.make_ml_primary_key_urn(
feature_group_name,
feature_group_details["RecordIdentifierFeatureName"],
)
],
# additional metadata
customProperties={
"arn": feature_group_details["FeatureGroupArn"],
"creation_time": str(feature_group_details["CreationTime"]),
"status": feature_group_details["FeatureGroupStatus"],
},
)
)
# make the MCE and workunit
mce = MetadataChangeEvent(proposedSnapshot=feature_group_snapshot)
return MetadataWorkUnit(id=feature_group_name, mce=mce)
field_type_mappings = {
"String": MLFeatureDataType.TEXT,
"Integral": MLFeatureDataType.ORDINAL,
"Fractional": MLFeatureDataType.CONTINUOUS,
}
def get_feature_type(self, aws_type: str, feature_name: str) -> str:
mapped_type = self.field_type_mappings.get(aws_type)
if mapped_type is None:
self.report.report_warning(
feature_name, f"unable to map type {aws_type} to metadata schema"
)
mapped_type = MLFeatureDataType.UNKNOWN
return mapped_type
def get_feature_wu(
self,
feature_group_details: "DescribeFeatureGroupResponseTypeDef",
feature: "FeatureDefinitionTypeDef",
) -> MetadataWorkUnit:
"""
Generate an MLFeature workunit for a SageMaker feature.
Parameters
----------
feature_group_details:
ingested SageMaker feature group from get_feature_group_details()
feature:
ingested SageMaker feature
"""
# if the feature acts as the record identifier, then we ingest it as an MLPrimaryKey
# the RecordIdentifierFeatureName is guaranteed to exist as it's required on creation
is_record_identifier = (
feature_group_details["RecordIdentifierFeatureName"]
== feature["FeatureName"]
)
feature_sources = []
if "OfflineStoreConfig" in feature_group_details:
# remove S3 prefix (s3://)
s3_name = feature_group_details["OfflineStoreConfig"]["S3StorageConfig"][
"S3Uri"
][5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
feature_sources.append(
builder.make_dataset_urn(
"s3",
s3_name,
self.env,
)
)
if "DataCatalogConfig" in feature_group_details["OfflineStoreConfig"]:
# if Glue catalog associated with offline store
glue_database = feature_group_details["OfflineStoreConfig"][
"DataCatalogConfig"
]["Database"]
glue_table = feature_group_details["OfflineStoreConfig"][
"DataCatalogConfig"
]["TableName"]
full_table_name = f"{glue_database}.{glue_table}"
self.report.report_warning(
full_table_name,
f"""Note: table {full_table_name} is an AWS Glue object.
To view full table metadata, run Glue ingestion
(see https://datahubproject.io/docs/metadata-ingestion/#aws-glue-glue)""",
)
feature_sources.append(
f"urn:li:dataset:(urn:li:dataPlatform:glue,{full_table_name},{self.env})"
)
# note that there's also an OnlineStoreConfig field, but this
# lacks enough metadata to create a dataset
# (only specifies the security config and whether it's enabled at all)
# append feature name and type
if is_record_identifier:
primary_key_snapshot: MLPrimaryKeySnapshot = MLPrimaryKeySnapshot(
urn=builder.make_ml_primary_key_urn(
feature_group_details["FeatureGroupName"],
feature["FeatureName"],
),
aspects=[
MLPrimaryKeyPropertiesClass(
dataType=self.get_feature_type(
feature["FeatureType"], feature["FeatureName"]
),
sources=feature_sources,
),
],
)
# make the MCE and workunit
mce = MetadataChangeEvent(proposedSnapshot=primary_key_snapshot)
else:
# create snapshot instance for the feature
feature_snapshot: MLFeatureSnapshot = MLFeatureSnapshot(
urn=builder.make_ml_feature_urn(
feature_group_details["FeatureGroupName"],
feature["FeatureName"],
),
aspects=[
MLFeaturePropertiesClass(
dataType=self.get_feature_type(
feature["FeatureType"], feature["FeatureName"]
),
sources=feature_sources,
)
],
)
# make the MCE and workunit
mce = MetadataChangeEvent(proposedSnapshot=feature_snapshot)
return MetadataWorkUnit(
id=f'{feature_group_details["FeatureGroupName"]}-{feature["FeatureName"]}',
mce=mce,
)
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
feature_groups = self.get_all_feature_groups()
for feature_group in feature_groups:
feature_group_details = self.get_feature_group_details(
feature_group["FeatureGroupName"]
)
for feature in feature_group_details["FeatureDefinitions"]:
self.report.report_feature_scanned()
wu = self.get_feature_wu(feature_group_details, feature)
self.report.report_workunit(wu)
yield wu
self.report.report_feature_group_scanned()
wu = self.get_feature_group_wu(feature_group_details)
self.report.report_workunit(wu)
yield wu
| {
"content_hash": "504cbddf253194a2e9928b1b2c2a7597",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 143,
"avg_line_length": 37.04982206405694,
"alnum_prop": 0.5839976947459418,
"repo_name": "linkedin/WhereHows",
"id": "381ab4ef88af8a5fb2dab37a67cbfc905230cf62",
"size": "10411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/src/datahub/ingestion/source/aws/sagemaker_processors/feature_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
} |
import copy
import mock
import six
from rally.plugins.openstack.context.ceilometer import samples
from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils
from tests.unit import test
CTX = "rally.plugins.openstack.context.ceilometer"
class CeilometerSampleGeneratorTestCase(test.TestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def _gen_context(self, tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource):
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": mock.MagicMock()})
context = test.get_test_context()
context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"ceilometer": {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resources_per_tenant": resources_per_tenant,
"samples_per_resource": samples_per_resource,
"timestamp_interval": 60,
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
return tenants, context
def test_init(self):
context = {}
context["task"] = mock.MagicMock()
context["config"] = {
"ceilometer": {
"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "instance",
"counter_volume": 1.0,
"resources_per_tenant": 5,
"samples_per_resource": 5,
"timestamp_intervals": 60,
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
}
inst = samples.CeilometerSampleGenerator(context)
self.assertEqual(inst.config, context["config"]["ceilometer"])
def test_setup(self):
tenants_count = 2
users_per_tenant = 2
resources_per_tenant = 2
samples_per_resource = 2
tenants, real_context = self._gen_context(
tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource)
scenario = ceilo_utils.CeilometerScenario(real_context)
sample = {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resource_id": "fake-resource-id",
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
scenario.generate_random_name = mock.Mock(
return_value="fake_resource-id")
kwargs = copy.deepcopy(sample)
kwargs.pop("resource_id")
samples_to_create = scenario._make_samples(count=samples_per_resource,
interval=60, **kwargs)
new_context = copy.deepcopy(real_context)
for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("samples", [])
new_context["tenants"][id_].setdefault("resources", [])
for i in six.moves.xrange(resources_per_tenant):
for sample in samples_to_create:
new_context["tenants"][id_]["samples"].append(sample)
new_context["tenants"][id_]["resources"].append(
sample["resource_id"])
with mock.patch("%s.samples.ceilo_utils.CeilometerScenario"
"._create_samples" % CTX) as mock_create_samples:
mock_create_samples.return_value = []
for i, sample in enumerate(samples_to_create):
sample_object = mock.MagicMock(resource_id="fake_resource-id")
sample_object.to_dict.return_value = sample
mock_create_samples.return_value.append(sample_object)
ceilometer_ctx = samples.CeilometerSampleGenerator(real_context)
ceilometer_ctx.setup()
self.assertEqual(new_context, ceilometer_ctx.context)
def test_cleanup(self):
tenants, context = self._gen_context(2, 5, 3, 3)
ceilometer_ctx = samples.CeilometerSampleGenerator(context)
ceilometer_ctx.cleanup()
| {
"content_hash": "b61ffcd7232d387c94da8c25f9db6625",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 41.18881118881119,
"alnum_prop": 0.5084889643463497,
"repo_name": "group-policy/rally",
"id": "93e5447f879351ff39be02983795488f56e7b20a",
"size": "6456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/context/ceilometer/test_samples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "35771"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2926625"
},
{
"name": "Shell",
"bytes": "40843"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_enhance_action_a.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "12b011f83269773120eb8c4c03dcc8f0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.7012578616352201,
"repo_name": "anhstudios/swganh",
"id": "dea92fd2a692ff0d858b0863d4bf41a7d6805aad",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_enhance_action_a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import sys
import gc
from pathlib import Path
from datetime import datetime, timedelta
import logging
import numpy as np
import ephem
from astropy.table import Table, Column
from astropy import stats
from astroquery.vizier import Vizier
##-------------------------------------------------------------------------
## Function: get_memory_size
##-------------------------------------------------------------------------
def get_memory_size(input_obj):
memory_size = 0
ids = set()
objects = [input_obj]
while objects:
new = []
for obj in objects:
if id(obj) not in ids:
ids.add(id(obj))
memory_size += sys.getsizeof(obj)
new.append(obj)
objects = gc.get_referents(*new)
return memory_size/1024/1024
##-------------------------------------------------------------------------
## Function: analyze_object_memory
##-------------------------------------------------------------------------
def analyze_object_memory(input_obj):
output_string = "\n"
elements = dir(input_obj)
for element in elements:
mem = get_memory_size(getattr(input_obj, element))
output_string += f"Memory size of {element} = {mem:.1f} MB\n"
return output_string
##-------------------------------------------------------------------------
## Function: get_orientation_angles
##-------------------------------------------------------------------------
def get_orientation_angles(w):
dx, dy = np.dot(w.wcs.get_pc(), [0,1])
north = np.arctan(dx/dy)*180/np.pi
dx, dy = np.dot(w.wcs.get_pc(), [1,0])
east = np.arctan(dx/dy)*180/np.pi
return north, east
##-----------------------------------------------------------------------------
## Function: get_destination_dir
##-----------------------------------------------------------------------------
def get_destination_dir(cfg, date=None):
raw_string = cfg['FileHandling'].get('destination_dir')
if date is None:
date = datetime.utcnow()
y = f'{date.year:4d}'
m = f'{date.month:02d}'
d = f'{date.day:02d}'
result = raw_string.replace('YYYY', y).replace('MM', m).replace('DD', d)
return result
##-----------------------------------------------------------------------------
## Function: get_destination_dir
##-----------------------------------------------------------------------------
def get_jpeg_dir(cfg, obstime):
raw_string = cfg['jpeg'].get('directory')
y = f'{obstime.year:4d}'
m = f'{obstime.month:02d}'
d = f'{obstime.day:02d}'
result = raw_string.replace('YYYY', y).replace('MM', m).replace('DD', d)
return result
##-----------------------------------------------------------------------------
## Function: get_sunrise_sunset
##-----------------------------------------------------------------------------
def get_sunrise_sunset(start):
obs = ephem.Observer()
obs.lon = "-155:34:33.9"
obs.lat = "+19:32:09.66"
obs.elevation = 3400.0
obs.temp = 10.0
obs.pressure = 680.0
obs.date = start.strftime('%Y/%m/%d 10:00:00')
obs.horizon = '0.0'
result = {'sunset': obs.previous_setting(ephem.Sun()).datetime(),
'sunrise': obs.next_rising(ephem.Sun()).datetime(),
}
obs.horizon = '-6.0'
result['evening_civil_twilight'] = obs.previous_setting(ephem.Sun(),
use_center=True).datetime()
result['morning_civil_twilight'] = obs.next_rising(ephem.Sun(),
use_center=True).datetime()
obs.horizon = '-12.0'
result['evening_nautical_twilight'] = obs.previous_setting(ephem.Sun(),
use_center=True).datetime()
result['morning_nautical_twilight'] = obs.next_rising(ephem.Sun(),
use_center=True).datetime()
obs.horizon = '-18.0'
result['evening_astronomical_twilight'] = obs.previous_setting(ephem.Sun(),
use_center=True).datetime()
result['morning_astronomical_twilight'] = obs.next_rising(ephem.Sun(),
use_center=True).datetime()
return result
##-----------------------------------------------------------------------------
## Function: Evaluate pre and post conditions
##-----------------------------------------------------------------------------
def pre_condition(primitive, name, condition,
fail_level=logging.DEBUG,
success_level=logging.DEBUG):
if condition is True:
primitive.log.log(success_level,
f'Precondition for {primitive.__class__.__name__} "{name}" satisfied')
else:
primitive.log.log(fail_level,
f'Precondition for {primitive.__class__.__name__} "{name}" failed')
return condition
def post_condition(primitive, name, condition,
fail_level=logging.WARNING,
success_level=logging.DEBUG):
if condition is True:
primitive.log.log(success_level,
f'Postcondition for {primitive.__class__.__name__} "{name}" satisfied')
else:
primitive.log.log(fail_level,
f'Postcondition for {primitive.__class__.__name__} "{name}" failed')
return condition
##-----------------------------------------------------------------------------
## Function: download_vizier
##-----------------------------------------------------------------------------
def download_vizier(pointing, radius, catalog='UCAC4', band='i', maglimit=None):
catalogs = {'UCAC4': 'I/322A', 'Gaia': 'I/345/gaia2'}
if catalog not in catalogs.keys():
print(f'{catalog} not in {catalogs.keys()}')
raise NotImplementedError
if band not in ['r', 'i']:
print(f'Band {band} not supported')
raise NotImplementedError
columns = {'UCAC4': ['_RAJ2000', '_DEJ2000', 'rmag', 'imag'],
'Gaia': ['RA_ICRS', 'DE_ICRS', 'Gmag', 'RPmag']}
ra_colname = {'UCAC4': '_RAJ2000',
'Gaia': 'RA_ICRS'}
dec_colname = {'UCAC4': '_DEJ2000',
'Gaia': 'DE_ICRS'}
mag_colname = {'UCAC4': f'{band}mag',
'Gaia': 'RPmag'}
filter_string = '>0' if maglimit is None else f"<{maglimit}"
column_filter = {mag_colname[catalog]: filter_string}
v = Vizier(columns=columns[catalog],
column_filters=column_filter)
v.ROW_LIMIT = 2e4
try:
stars = Table(v.query_region(pointing, catalog=catalogs[catalog],
radius=c.Angle(radius, "deg"))[0])
stars.add_column( Column(data=stars[ra_colname[catalog]], name='RA') )
stars.add_column( Column(data=stars[dec_colname[catalog]], name='DEC') )
stars.add_column( Column(data=stars[mag_colname[catalog]], name='mag') )
except:
stars = None
return stars
##-----------------------------------------------------------------------------
## Function: get_panstarrs
##-----------------------------------------------------------------------------
def get_panstarrs(cfg, field_name, pointing, filter, radius=0.40,
maglimit=None, log=None):
catalogname = cfg['Photometry'].get('calibration_catalog')
band = {'PSi': 'i', 'PSr': 'r'}[filter]
if maglimit is None: maglimit = 25
## First check if we have a pre-downloaded catalog for this field
local_catalog_path = Path(cfg['Photometry'].get('local_catalog_path', '.'))
local_catalog_file = local_catalog_path.joinpath(f'{field_name}_{band}{maglimit*10:03.0f}.cat')
if local_catalog_file.exists() is True:
## Read local file
if log: log.debug(f' Reading {local_catalog_file}')
pscat = Table.read(local_catalog_file, format='ascii.csv')
else:
## Download
if log: log.debug(f' Downloading from Mast')
from astroquery.mast import Catalogs
# cols = ['objName', 'objID', 'objInfoFlag', 'qualityFlag', 'raMean',
# 'decMean', 'raMeanErr', 'decMeanErr', 'epochMean', 'nDetections',
# 'ng', 'nr', 'ni', 'gMeanApMag', 'gMeanApMagErr', 'gMeanApMagStd',
# 'gMeanApMagNpt', 'gFlags', 'rMeanApMag', 'rMeanApMagErr',
# 'rMeanApMagStd', 'rMeanApMagNpt', 'rFlags', 'iMeanApMag',
# 'iMeanApMagErr', 'iMeanApMagStd', 'iMeanApMagNpt', 'iFlags']
cols = ['objName', 'objID', 'raMean', 'decMean', 'raMeanErr', 'decMeanErr',
'gMeanApMag', 'gMeanApMagErr', 'gMeanApMagStd',
'gMeanApMagNpt', 'gFlags', 'rMeanApMag', 'rMeanApMagErr',
'rMeanApMagStd', 'rMeanApMagNpt', 'rFlags', 'iMeanApMag',
'iMeanApMagErr', 'iMeanApMagStd', 'iMeanApMagNpt', 'iFlags']
if band in ['i', 'r', 'g']:
pscat = Catalogs.query_region(pointing, radius=radius,
catalog="Panstarrs", table="mean", data_release="dr2",
sort_by=[("desc", f"{band}MeanApMag")], columns=cols,
iMeanApMag=[("gte", 0), ("lte", maglimit)],
)
else:
pscat = Catalogs.query_region(pointing, radius=radius,
catalog="Panstarrs", table="mean", data_release="dr2",
columns=cols,
)
# if band == 'i':
# pscat = Catalogs.query_region(pointing, radius=radius,
# catalog="Panstarrs", table="mean", data_release="dr2",
# sort_by=[("desc", f"{band}MeanApMag")], columns=cols,
# iMeanApMag=[("gte", 0), ("lte", maglimit)],
# )
# elif band == 'r':
# pscat = Catalogs.query_region(pointing, radius=radius,
# catalog="Panstarrs", table="mean", data_release="dr2",
# sort_by=[("desc", f"{band}MeanApMag")], columns=cols,
# rMeanApMag=[("gte", 0), ("lte", maglimit)],
# )
# elif band == 'g':
# pscat = Catalogs.query_region(pointing, radius=radius,
# catalog="Panstarrs", table="mean", data_release="dr2",
# sort_by=[("desc", f"{band}MeanApMag")], columns=cols,
# gMeanApMag=[("gte", 0), ("lte", maglimit)],
# )
# else:
# pscat = Catalogs.query_region(pointing, radius=radius,
# catalog="Panstarrs", table="mean", data_release="dr2",
# columns=cols,
# )
if log: log.debug(f' Got {len(pscat)} entries total')
if log: log.debug(f' Got {len(pscat)} entries with {band}-band magnitudes')
if log: log.debug(f' Writing {local_catalog_file}')
pscat.write(local_catalog_file, format='ascii.csv')
# Filter based on magnitude
if maglimit is not None:
pscat = pscat[pscat[f'{band}MeanApMag'] <= maglimit]
return pscat
##-----------------------------------------------------------------------------
## Function: sigma_clipping_line_fit
##-----------------------------------------------------------------------------
def sigma_clipping_line_fit(xdata, ydata, nsigma=3, maxiter=7, maxcleanfrac=0.3,
intercept_fixed=False, intercept0=0, slope0=1,
log=None):
if log: log.debug(' Running sigma_clipping_line_fit')
npoints = len(xdata)
if log: log.debug(f' npoints = {npoints}')
fit = fitting.LinearLSQFitter()
line_init = models.Linear1D(slope=slope0, intercept=intercept0)
line_init.intercept.fixed = intercept_fixed
fitted_line = fit(line_init, xdata, ydata)
deltas = ydata - fitted_line(xdata)
mean, median, std = stats.sigma_clipped_stats(deltas)
cleaned = np.array(abs(deltas) < nsigma*std)
# if log: log.debug(cleaned)
if log: log.debug(f' fitted slope = {fitted_line.slope.value:3g}')
if log: log.debug(f' std = {std:4g}')
if log: log.debug(f' n_cleaned = {np.sum(cleaned)}')
for iteration in range(1, maxiter+1):
last_std = std
new_fit = fit(line_init, xdata[cleaned], ydata[cleaned])
deltas = ydata - new_fit(xdata)
mean, median, std = stats.sigma_clipped_stats(deltas)
cleaned = cleaned | np.array(abs(deltas) < nsigma*std)
if np.sum(~cleaned)/npoints > maxcleanfrac:
if log: log.debug(f' Exceeded maxcleanfrac of {maxcleanfrac}')
return fitted_line
if std > last_std:
if log: log.debug(f' StdDev increased')
return fitted_line
else:
fitted_line = new_fit
# if log: log.debug(cleaned)
if log: log.debug(f' {iteration} fitted slope = {fitted_line.slope.value:3g}')
if log: log.debug(f' {iteration} std = {std:4g}')
if log: log.debug(f' {iteration} n_cleaned = {np.sum(cleaned)}')
return fitted_line
##-----------------------------------------------------------------------------
## Function: estimate_f0
##-----------------------------------------------------------------------------
def estimate_f0(A, band='i'):
'''
1 Jy = 1.51e7 photons sec^-1 m^-2 (dlambda/lambda)^-1
https://archive.is/20121204144725/http://www.astro.utoronto.ca/~patton/astro/mags.html#selection-587.2-587.19
band cent dl/l Flux0 Reference
U 0.36 0.15 1810 Bessel (1979)
B 0.44 0.22 4260 Bessel (1979)
V 0.55 0.16 3640 Bessel (1979)
R 0.64 0.23 3080 Bessel (1979)
I 0.79 0.19 2550 Bessel (1979)
J 1.26 0.16 1600 Campins, Reike, & Lebovsky (1985)
H 1.60 0.23 1080 Campins, Reike, & Lebovsky (1985)
K 2.22 0.23 670 Campins, Reike, & Lebovsky (1985)
g 0.52 0.14 3730 Schneider, Gunn, & Hoessel (1983)
r 0.67 0.14 4490 Schneider, Gunn, & Hoessel (1983)
i 0.79 0.16 4760 Schneider, Gunn, & Hoessel (1983)
z 0.91 0.13 4810 Schneider, Gunn, & Hoessel (1983)
'''
tabledata = {'band': ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'g', 'r', 'i', 'z'],
'cent': [0.36, 0.44, 0.55, 0.64, 0.79, 1.26, 1.60, 2.22, 0.52, 0.67, 0.79, 0.91],
'dl/l': [0.15, 0.22, 0.16, 0.23, 0.19, 0.16, 0.23, 0.23, 0.14, 0.14, 0.16, 0.13],
'Flux0': [1810, 4260, 3640, 3080, 2550, 1600, 1080, 670 , 3730, 4490, 4760, 4810],
}
t = Table(tabledata)
band = t[t['band'] == band]
dl = 0.16 # dl/l (for i band)
f0 = band['Flux0'] * 1.51e7 * A * band['dl/l'] # photons / sec
return f0[0]
##-----------------------------------------------------------------------------
## Function: mode
##-----------------------------------------------------------------------------
def mode(data):
'''
Return mode of image. Assumes int values (ADU), so uses binsize of one.
'''
bmin = np.floor(min(data.ravel())) - 1./2.
bmax = np.ceil(max(data.ravel())) + 1./2.
bins = np.arange(bmin,bmax,1)
hist, bins = np.histogram(data.ravel(), bins=bins)
centers = (bins[:-1] + bins[1:]) / 2
w = np.argmax(hist)
mode = int(centers[w])
return mode
##-----------------------------------------------------------------------------
## Function: find_master
##-----------------------------------------------------------------------------
def build_master_file_name(meta, master_type, date_string):
if master_type in ['BIAS']:
master_file_name = f"MasterBIAS_{date_string}.fits"
elif master_type in ['DARK']:
exptime = int(meta.get('exptime'))
master_file_name = f"MasterDARK_{exptime:03d}s_{date_string}.fits"
elif master_type in ['FLAT']:
master_file_name = f"MasterFLAT_{meta.get('filter')}_{date_string}.fits"
else:
master_file_name = None
return master_file_name
def find_master(master_directory, master_type, meta):
# Find master bias file
if master_directory is not None:
master_directory = Path(master_directory)
else:
return None
if master_directory.exists() is False:
return None
# Build expected file name
date_string = meta.get('date').strftime('%Y%m%dUT')
master_file_name = build_master_file_name(meta, master_type, date_string)
master_file = master_directory.joinpath(master_file_name)
# Go hunting for the files
if master_file.exists() is True:
return master_file
else:
# Look for bias within 10 days
count = 0
while master_file.exists() is False and count <= 10:
count += 1
# Days before
date_string = (meta.get('date')-timedelta(count)).strftime('%Y%m%dUT')
master_file_name = build_master_file_name(meta, master_type, date_string)
master_file = master_directory.joinpath(master_file_name)
if master_file.exists() is True:
return master_file
# Days after
date_string = (meta.get('date')+timedelta(count)).strftime('%Y%m%dUT')
master_file_name = build_master_file_name(meta, master_type, date_string)
master_file = master_directory.joinpath(master_file_name)
if master_file.exists() is True:
return master_file
if master_file.exists() is False:
return None
return master_file
| {
"content_hash": "639c0f69d3d19dbfc3c8b462776ba42c",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 113,
"avg_line_length": 44.03703703703704,
"alnum_prop": 0.49184188393608075,
"repo_name": "joshwalawender/IQMon",
"id": "d07db0148215b8a374eaaf6ab035cc46b0fc1799",
"size": "17835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iqmon/primitives/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "15878"
},
{
"name": "Python",
"bytes": "219609"
}
],
"symlink_target": ""
} |
import unittest
from biicode.common.model.bii_type import CPP
from biicode.common.model.cells import SimpleCell
from biicode.common.edition.processors.deps_configuration import DependenciesConfigurationProcessor
from biicode.common.model.brl.block_cell_name import BlockCellName
from biicode.common.model.blob import Blob
from biicode.common.model.content import Content
from biicode.common.test.edition.processors.helpers import process_holder
from biicode.common.edition.processors.processor_changes import ProcessorChanges
from biicode.common.edition.block_holder import BlockHolder, BIICODE_FILE
from biicode.common.model.resource import Resource
from biicode.common.model.brl.block_name import BlockName
from biicode.common.output_stream import OutputStream
class DependencyConfigurationProcessorTest(unittest.TestCase):
def _prepare_context(self, my_conf):
if my_conf:
my_conf = "[dependencies]\n " + my_conf
self.processor = DependenciesConfigurationProcessor()
self.r1 = BlockCellName('user/block/r1.h')
self.r2 = BlockCellName('user/block/r2.cpp')
self.r3 = BlockCellName('user/block/r3.cpp')
r1 = SimpleCell(self.r1, CPP)
r2 = SimpleCell(self.r2, CPP)
r3 = SimpleCell('user/block/r3.cpp', CPP)
r4 = SimpleCell('user/block/' + BIICODE_FILE)
res = {r1.name: Resource(r1, None),
r2.name: Resource(r2, None),
r3.name: Resource(r3, None),
r4.name: Resource(r4, Content(None, Blob(my_conf)))
}
return BlockHolder(BlockName('user/block'), res)
def test_basic_add(self):
my_conf = 'r1.h + r2.cpp'
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_old_format_add_implicit(self):
my_conf = 'r1.h r2.cpp'
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_old_format_add_explicit(self):
my_conf = '+r1.h r2.cpp'
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_basic_remove(self,):
my_conf = 'r1.h - r2.cpp'
block_holder = self._prepare_context(my_conf)
block_holder[self.r1.cell_name].cell.dependencies.implicit.add(self.r2)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual(set(), r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_old_format_remove(self,):
my_conf = '-r1.h r2.cpp'
block_holder = self._prepare_context(my_conf)
block_holder[self.r1.cell_name].cell.dependencies.implicit.add(self.r2)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual(set(), r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_basic_assign(self,):
my_conf = 'r1.h = r2.cpp'
block_holder = self._prepare_context(my_conf)
block_holder[self.r1.cell_name].cell.dependencies.implicit.add(self.r3)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_old_format_assign(self,):
my_conf = '=r1.h r2.cpp'
block_holder = self._prepare_context(my_conf)
block_holder[self.r1.cell_name].cell.dependencies.implicit.add(self.r3)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
self.assertEqual(set(), r1.dependencies.exclude_from_build)
def test_empty_process_config_file(self):
my_conf = ''
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
def test_process_config_file_with_unknown_target(self):
my_conf = '''r8.h + r2.cpp r3.cpp'''
block_holder = self._prepare_context(my_conf)
changes, biiout = process_holder(block_holder, self.processor)
self.assertIn('There are no files matching pattern r8.h', str(biiout))
self.assertEqual(0, len(changes.upserted))
def test_process_config_file_with_unknown_deps(self):
my_conf = '''r1.h + r9.cpp'''
block_holder = self._prepare_context(my_conf)
block_holder[self.r1.cell_name].cell.dependencies.implicit.add(self.r2)
changes = ProcessorChanges()
biiout = OutputStream()
self.processor.do_process(block_holder, changes, biiout)
self.assertIn('There are no files matching pattern r9.cpp', str(biiout))
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
def test_process_config_file_with_other_block_dep(self):
my_conf = '''r1.h + r23.cpp'''
block_holder = self._prepare_context(my_conf)
changes, biiout = process_holder(block_holder, self.processor)
self.assertIn('There are no files matching pattern r23.cpp', str(biiout))
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual(set(), r1.dependencies.implicit)
def test_process_config_file_with_wildcard(self):
my_conf = '''* + r2.cpp'''
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2}, r1.dependencies.implicit)
r3 = block_holder[self.r3.cell_name].cell
self.assertEqual({self.r2}, r3.dependencies.implicit)
def test_process_config_file_with_partial_wildcard(self):
my_conf = '''*1.h + r2.cpp r3.cpp'''
block_holder = self._prepare_context(my_conf)
changes, _ = process_holder(block_holder, self.processor)
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2, self.r3}, r1.dependencies.implicit)
def test_process_config_file_with_partial_wildcard_and_cell_patterns(self):
my_conf = '''*1.h + *.cpp r23.cpp'''
block_holder = self._prepare_context(my_conf)
changes, biiout = process_holder(block_holder, self.processor)
self.assertIn('There are no files matching pattern r23.cpp', str(biiout))
self.assertEqual(0, len(changes.upserted))
r1 = block_holder[self.r1.cell_name].cell
self.assertEqual({self.r2, self.r3}, r1.dependencies.implicit)
| {
"content_hash": "faa142f22cbeea3f3ee1a7f8cd7bfad4",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 99,
"avg_line_length": 46.8235294117647,
"alnum_prop": 0.6670854271356784,
"repo_name": "drodri/common",
"id": "7aae2b443851703ad313d47d9558b3a4f16895ac",
"size": "7960",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/edition/processors/dependency_configuration_processor_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3157300"
},
{
"name": "C++",
"bytes": "4667113"
},
{
"name": "CMake",
"bytes": "25379"
},
{
"name": "FORTRAN",
"bytes": "3691"
},
{
"name": "Java",
"bytes": "4201"
},
{
"name": "JavaScript",
"bytes": "172849"
},
{
"name": "Makefile",
"bytes": "6333"
},
{
"name": "Objective-C",
"bytes": "826"
},
{
"name": "Python",
"bytes": "714678"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
} |
import typing as tp
TIME_MODIFIERS = [
('s', 1),
('m', 60),
('h', 60 * 60),
('d', 24 * 60 * 60),
('w', 7 * 24 * 60 * 60)
]
def parse_time_string(s: tp.Union[int, float, str]) -> float:
"""
Parse a time string into seconds, so eg. '30m' will be equal to 1800, and so will
be '30 min'.
This will correctly parse:
- seconds
- minutes
- hours
- days
- weeks
.. warning:: This does not handle fractions of a second!
:param s: time string or time value in seconds
:return: value in seconds
"""
if isinstance(s, (int, float)):
return s
for modifier, multiple in TIME_MODIFIERS:
if modifier in s:
return float(s[:s.index(modifier)]) * multiple
return float(s)
| {
"content_hash": "437ef6fcffcc380d4faa5b6050333438",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 21.444444444444443,
"alnum_prop": 0.5595854922279793,
"repo_name": "piotrmaslanka/satella",
"id": "329ac8636abaace5a8b519ed4abf8dad7a2397c9",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "satella/time/parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "513"
},
{
"name": "Python",
"bytes": "849315"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
import random
from tests.ggrc import TestCase
from freezegun import freeze_time
import os
from mock import patch
from ggrc import notification
from ggrc.models import Person
from tests.ggrc_workflows.generator import WorkflowsGenerator
from tests.ggrc.api_helper import Api
from tests.ggrc.generator import GgrcGenerator
from ggrc_workflows import views
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestRecurringCycleNotifications(TestCase):
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.generator = WorkflowsGenerator()
self.ggrc_generator = GgrcGenerator()
_, self.assignee = self.ggrc_generator.generate_person(
user_role="gGRC Admin")
self.create_test_cases()
def tearDown(self):
pass
def test_cycle_starts_in_less_than_X_days(self):
with freeze_time("2015-02-01"):
_, wf = self.generator.generate_workflow(self.quarterly_wf_1)
response, wf = self.generator.activate_workflow(wf)
self.assert200(response)
assignee = Person.query.get(self.assignee.id)
with freeze_time("2015-01-01"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-01-29"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(assignee.email, notif_data)
with freeze_time("2015-02-01"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(assignee.email, notif_data)
# TODO: this should mock google email api.
@patch("ggrc.notification.email.send_email")
def test_marking_sent_notifications(self, mail_mock):
mail_mock.return_value = True
with freeze_time("2015-02-01"):
_, wf = self.generator.generate_workflow(self.quarterly_wf_1)
response, wf = self.generator.activate_workflow(wf)
self.assert200(response)
assignee = Person.query.get(self.assignee.id)
with freeze_time("2015-01-01"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-01-29"):
views.send_todays_digest_notifications()
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-02-01"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.quarterly_wf_1 = {
"title": "quarterly wf 1",
"description": "",
"owners": [person_dict(self.assignee.id)],
"frequency": "quarterly",
"notify_on_change": True,
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.assignee.id),
"task_group_tasks": [{
"contact": person_dict(self.assignee.id),
"description": self.generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
self.all_workflows = [
self.quarterly_wf_1,
]
| {
"content_hash": "682aba1bc215410fe86b70104dc75c54",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 67,
"avg_line_length": 29.57758620689655,
"alnum_prop": 0.6377149519090645,
"repo_name": "uskudnik/ggrc-core",
"id": "c7d39781cff0cadf6adaaa3f8cabb1897662ec1e",
"size": "3671",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/tests/ggrc_workflows/notifications/test_recurring_cycles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "232153"
},
{
"name": "Cucumber",
"bytes": "140526"
},
{
"name": "HTML",
"bytes": "6048248"
},
{
"name": "JavaScript",
"bytes": "1878527"
},
{
"name": "Makefile",
"bytes": "5524"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11509"
}
],
"symlink_target": ""
} |
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
async def test_pagure(get_version):
ver = await get_version("example", {
"source": "pagure",
"pagure": "nvchecker-test",
})
assert ver == "0.2"
async def test_pagure_with_ignored(get_version):
ver = await get_version("example", {
"source": "pagure",
"pagure": "nvchecker-test",
"ignored": "0.2",
})
assert ver == "0.1"
async def test_pagure_with_alternative_host(get_version):
ver = await get_version("example", {
"source": "pagure",
"pagure": "rpms/glibc",
"host": "src.fedoraproject.org",
"include_regex": r"F-\d+-start",
})
assert ver == "F-13-start"
| {
"content_hash": "45970f54f724bcf3fa85de3bfab94964",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 57,
"avg_line_length": 28.346153846153847,
"alnum_prop": 0.5780189959294437,
"repo_name": "lilydjwg/nvchecker",
"id": "daeb966b8ffa547abbf5db973dc7b3394bed7c9a",
"size": "820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pagure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146836"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup
import versioneer
requirements = ['click >= 6.7', 'future >= 0.12.0', 'jinja2 >= 2.9', 'pyyaml >= 3.12', 'six']
if sys.version_info.major == 2:
requirements.append('backports.functools_lru_cache >= 1.4')
setup(
name="conda-verify",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Anaconda, Inc.",
author_email="conda@anaconda.com",
url="https://github.com/conda/conda-verify",
license="BSD",
description="A tool for validating conda recipes and conda packages",
long_description=open('README.md').read(),
packages=['conda_verify'],
install_requires=requirements,
entry_points='''
[console_scripts]
conda-verify=conda_verify.cli:cli
''',
)
| {
"content_hash": "5f6cd06a3c67e70be094b5c5328aa7f9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 93,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.6544303797468355,
"repo_name": "mandeep/conda-verify",
"id": "6ebdcea568fcd1aaf122fe231fb27438b417a154",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "174458"
}
],
"symlink_target": ""
} |
from conans import ConanFile
class CatchConan(ConanFile):
name = "Catch"
version = "1.12.2"
description = "A modern, C++-native, header-only, framework for unit-tests, TDD and BDD"
author = "philsquared"
generators = "cmake"
exports_sources = "single_include/*"
url = "https://github.com/philsquared/Catch"
license = "Boost Software License - Version 1.0. http://www.boost.org/LICENSE_1_0.txt"
def package(self):
self.copy(pattern="catch.hpp", src="single_include", dst="include")
def package_id(self):
self.info.header_only()
| {
"content_hash": "a4820f01cbc06e58795a3d3526d26bbc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6531302876480541,
"repo_name": "ric2b/Vivaldi-browser",
"id": "d5072afc00e4b0a247935a0ab74cc365a1afec26",
"size": "613",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "update_notifier/thirdparty/wxWidgets/3rdparty/catch/conanfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""NApp responsible to update links detail and create a network topology."""
| {
"content_hash": "7d0148874bac646db8e7f376e7aa8427",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 76,
"avg_line_length": 77,
"alnum_prop": 0.7662337662337663,
"repo_name": "kytos/kyco-core-napps",
"id": "f6b6db092affedc49b0506b0e6a7299771835866",
"size": "77",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "napps/kytos/of_topology/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73608"
}
],
"symlink_target": ""
} |
from .character_span_instance import CharacterSpanInstance, IndexedCharacterSpanInstance
from .mc_question_passage_instance import McQuestionPassageInstance, IndexedMcQuestionPassageInstance
from .question_passage_instance import QuestionPassageInstance, IndexedQuestionPassageInstance
| {
"content_hash": "7badbf6f66752aa67522d6ccfb36f183",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 101,
"avg_line_length": 95.33333333333333,
"alnum_prop": 0.9020979020979021,
"repo_name": "allenai/deep_qa",
"id": "140406d8a33d4aff1e62a485fe7fe9d73b64e9a1",
"size": "286",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "deep_qa/data/instances/reading_comprehension/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "792559"
},
{
"name": "Shell",
"bytes": "4730"
}
],
"symlink_target": ""
} |
import os
# Configure the bind address
_host = os.environ.get("GUNICORN_HOST", "0.0.0.0")
_port = os.environ.get("GUNICORN_PORT", "8080")
bind = os.environ.get("GUNICORN_BIND", '{}:{}'.format(_host, _port))
# Configure the workers
workers = int(os.environ.get("GUNICORN_WORKERS", "2"))
threads = int(os.environ.get("GUNICORN_THREADS", "4"))
worker_class = os.environ.get("GUNICORN_WORKER_CLASS", "gthread")
# Configure logging
loglevel = os.environ.get("GUNICORN_LOGLEVEL", 'info')
errorlog = "-"
# Access logging on by default
if os.environ.get('GUNICORN_ACCESSLOG', '1') in {'1', 'yes', 'on', 'true'}:
accesslog = '-'
else:
accesslog = None
# Use the value of the remote ip header in the access log format
access_log_format = '%({x-forwarded-for}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
| {
"content_hash": "80a67074ca26ea9df2c4998f36007852",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 98,
"avg_line_length": 37.18181818181818,
"alnum_prop": 0.6540342298288508,
"repo_name": "ESGF/esgf-docker",
"id": "28bd525b7cc343d3707b95758bc8084d3695e5f5",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/django/gunicorn.conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "19225"
},
{
"name": "HTML",
"bytes": "2922"
},
{
"name": "Jinja",
"bytes": "6965"
},
{
"name": "Python",
"bytes": "8520"
},
{
"name": "Shell",
"bytes": "3557"
},
{
"name": "Smarty",
"bytes": "4021"
}
],
"symlink_target": ""
} |
import time
import logging
from dockerevent import DockerEvent, InvalidDockerEventError
from notifyable import Notifyable
logger = logging.getLogger(__name__)
class EventBroadcaster:
def __init__(self):
self.events_to_watch = ['die', 'stop', 'kill', 'start', 'health_status: healthy', 'health_status: unhealthy']
self.listeners = []
self.captured_events = {}
@staticmethod
def event_type_matches(ev, event_type):
if ev.type == event_type:
return True
@staticmethod
def event_type_matches_one_of(ev, event_types):
for ev_type in event_types:
if ev.type == ev_type:
return True
return False
@staticmethod
def event_max_age_in_seconds(ev, max_age_in_seconds, now):
age_in_seconds = now - ev.time
if age_in_seconds <= max_age_in_seconds:
return True
@staticmethod
def log_propagate_event(event_type, container_name):
logger.debug('Propagating %s event for container: %s' % (event_type, container_name))
def register(self, listener):
if not isinstance(listener, Notifyable):
raise TypeError("listener must be of type Notifyable")
self.listeners.append(listener)
def save_docker_event(self, event):
container_name = event.container_name
if container_name not in self.captured_events:
self.captured_events[container_name] = []
self.captured_events[container_name].append(event)
logger.debug('Saved docker event %s for container %s' % (event, container_name))
def notify_container_started(self, docker_event):
self.log_propagate_event('container-started', docker_event.container_name)
for listener in self.listeners:
listener.container_started(docker_event)
def notify_container_became_healthy(self, docker_event):
self.log_propagate_event('container-became-healthy', docker_event.container_name)
for listener in self.listeners:
listener.container_became_healthy(docker_event)
def notify_container_stopped_by_hand(self, docker_event):
self.log_propagate_event('container-stopped-by-hand', docker_event.container_name)
for listener in self.listeners:
listener.container_stopped_by_hand(docker_event)
def notify_container_dead(self, docker_event):
self.log_propagate_event('container-container-dead', docker_event.container_name)
for listener in self.listeners:
listener.container_dead(docker_event)
def notify_container_became_unhealthy(self, docker_event):
self.log_propagate_event('container-became-unhealthy', docker_event.container_name)
for listener in self.listeners:
listener.container_became_unhealthy(docker_event)
def broadcast_event(self, event_details):
if "status" in event_details:
try:
docker_event = DockerEvent.from_dict(event_details)
except InvalidDockerEventError:
logger.error('Invalid docker event received %s' % event_details)
else:
container_name = docker_event.container_name
if docker_event.type in self.events_to_watch:
self.save_docker_event(docker_event)
if docker_event.type == 'start':
self.notify_container_started(docker_event)
elif docker_event.type == 'health_status: healthy':
self.notify_container_became_healthy(docker_event)
elif docker_event.type == 'health_status: unhealthy':
if self.check_notify_required(container_name):
self.notify_container_became_unhealthy(docker_event)
elif self.check_notify_required(container_name):
self.notify_container_dead(docker_event)
def check_notify_required(self, container_name):
docker_events = self.captured_events[container_name]
if not docker_events:
logger.debug('Skipped event propagation, container %s does not have saved events' % container_name)
return False
die_events = self.get_die_events_from_last_period(container_name)
if not die_events:
logger.debug(
'Skipped event propagation, container %s does not have \'die\' events from the last period' % container_name)
return False
stop_or_kill_events = self.get_stop_or_kill_events_from_last_period(container_name)
if stop_or_kill_events:
logger.debug(
'Skipped event propagation, container %s does not have \'stop\' / \'kill\' events from the last period' % container_name)
return False
else:
return True
def get_die_events_from_last_period(self, container_name):
docker_events = self.captured_events[container_name]
now = time.time()
die_events = filter(lambda e: EventBroadcaster.event_type_matches(e, 'die'), docker_events)
die_events = filter(lambda e: EventBroadcaster.event_max_age_in_seconds(e, 5, now), die_events)
return die_events
def get_stop_or_kill_events_from_last_period(self, container_name):
docker_events = self.captured_events[container_name]
now = time.time()
stop_or_kill_events = filter(lambda e: EventBroadcaster.event_type_matches_one_of(e, ['stop', 'kill']),
docker_events)
stop_or_kill_events = filter(lambda e: EventBroadcaster.event_max_age_in_seconds(e, 12, now),
stop_or_kill_events)
return stop_or_kill_events
| {
"content_hash": "6e4a9a55c0e097115edd1b2e5f1c7469",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 137,
"avg_line_length": 43.68939393939394,
"alnum_prop": 0.6330847927865442,
"repo_name": "szyszy/dockermon",
"id": "7cae83dc99d9fad062635093ad473eda48baeccb",
"size": "5767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventbroadcaster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "34901"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
} |
"""
code from https://github.com/TyMaszWeb/django-template-finder
"""
import fnmatch
import logging
import os
import re
from django.conf import settings
from django.utils.text import capfirst
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
try:
from django.utils.six import string_types
except ImportError:
string_types = (str,)
try:
from django.template import Engine
except ImportError:
class Engine(object):
@staticmethod
def get_default():
return None
__all__ = ('find_all_templates', 'flatten_template_loaders', 'template_choices')
LOGGER = logging.getLogger('templatefinder')
def find_all_templates(pattern='*.html', ignore_private=True):
"""
Finds all Django templates matching given glob in all TEMPLATE_LOADERS
:param str pattern: `glob <http://docs.python.org/2/library/glob.html>`_
to match
.. important:: At the moment egg loader is not supported.
"""
templates = []
template_loaders = flatten_template_loaders(settings.TEMPLATE_LOADERS)
for loader_name in template_loaders:
module, klass = loader_name.rsplit('.', 1)
if loader_name in (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
):
loader_class = getattr(import_module(module), klass)
if getattr(loader_class, '_accepts_engine_in_init', False):
loader = loader_class(Engine.get_default())
else:
loader = loader_class()
for dir in loader.get_template_sources(''):
for root, dirnames, filenames in os.walk(dir):
for basename in filenames:
if ignore_private and basename.startswith("_"):
continue
filename = os.path.join(root, basename)
rel_filename = filename[len(dir)+1:]
if fnmatch.fnmatch(filename, pattern) or \
fnmatch.fnmatch(basename, pattern) or \
fnmatch.fnmatch(rel_filename, pattern):
templates.append(rel_filename)
else:
LOGGER.debug('%s is not supported' % loader_name)
return sorted(set(templates))
def flatten_template_loaders(templates):
"""
Given a collection of template loaders, unwrap them into one flat iterable.
:param templates: template loaders to unwrap
:return: template loaders as an iterable of strings.
:rtype: generator expression
"""
for loader in templates:
if not isinstance(loader, string_types):
for subloader in flatten_template_loaders(loader):
yield subloader
else:
yield loader
def template_choices(templates, display_names=None, suffix=False):
"""
Given an iterable of `templates`, calculate human-friendly display names
for each of them, optionally using the `display_names` provided, or a
global dictionary (`TEMPLATEFINDER_DISPLAY_NAMES`) stored in the Django
project's settings.
.. note:: As the resulting iterable is a lazy generator, if it needs to be
consumed more than once, it should be turned into a `set`, `tuple`
or `list`.
:param list templates: an iterable of template paths, as returned by
`find_all_templates`
:param display_names: If given, should be a dictionary where each key
represents a template path in `templates`, and each
value is the display text.
:type display_names: dictionary or None
:return: an iterable of two-tuples representing value (0) & display text (1)
:rtype: generator expression
"""
# allow for global template names, as well as usage-local ones.
if display_names is None:
display_names = getattr(settings, 'TEMPLATEFINDER_DISPLAY_NAMES', {})
to_space_re = re.compile(r'[^a-zA-Z0-9\-]+')
def fix_display_title(template_path):
if template_path in display_names:
return display_names[template_path]
# take the last part from the template path; works even if there is no /
lastpart = template_path.rpartition('/')[-1]
# take everything to the left of the rightmost . (the file extension)
if suffix:
lastpart_with_suffix = lastpart
return capfirst(lastpart_with_suffix)
else:
lastpart_minus_suffix = lastpart.rpartition('.')[0]
# convert most non-alphanumeric characters into spaces, with the
# exception of hyphens.
lastpart_spaces = to_space_re.sub(' ', lastpart_minus_suffix)
return capfirst(lastpart_spaces)
return ((template, fix_display_title(template)) for template in templates) | {
"content_hash": "ed2f7ca0a6f3b44b094fad3064581a6c",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 80,
"avg_line_length": 36.5735294117647,
"alnum_prop": 0.625653397667873,
"repo_name": "amboycharlie/Child-Friendly-LCMS",
"id": "b5005fb026d285f1257e979604eef82409f58522",
"size": "4974",
"binary": false,
"copies": "2",
"ref": "refs/heads/Child-Friendly-LCMS-0.5",
"path": "leonardo/utils/templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119867"
},
{
"name": "HTML",
"bytes": "229025"
},
{
"name": "JavaScript",
"bytes": "184465"
},
{
"name": "Python",
"bytes": "585907"
},
{
"name": "Shell",
"bytes": "4253"
}
],
"symlink_target": ""
} |
import warnings
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util
from sqlalchemy.orm import *
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.test import testing, engines
from sqlalchemy.util import function_named
from test.orm import _base, _fixtures
from sqlalchemy.test.schema import Table, Column
class O2MTest(_base.MappedTest):
"""deals with inheritance and one-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, ForeignKey('bar.id'), primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id'), nullable=False),
Column('data', String(20)))
def test_basic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo)
class Blub(Bar):
def __repr__(self):
return "Blub id %d, data %s" % (self.id, self.data)
mapper(Blub, blub, inherits=Bar, properties={
'parent_foo':relationship(Foo)
})
sess = create_session()
b1 = Blub("blub #1")
b2 = Blub("blub #2")
f = Foo("foo #1")
sess.add(b1)
sess.add(b2)
sess.add(f)
b1.parent_foo = f
b2.parent_foo = f
sess.flush()
compare = ','.join([repr(b1), repr(b2), repr(b1.parent_foo), repr(b2.parent_foo)])
sess.expunge_all()
l = sess.query(Blub).all()
result = ','.join([repr(l[0]), repr(l[1]), repr(l[0].parent_foo), repr(l[1].parent_foo)])
print compare
print result
self.assert_(compare == result)
self.assert_(l[0].parent_foo.data == 'foo #1' and l[1].parent_foo.data == 'foo #1')
class FalseDiscriminatorTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', Boolean, nullable=False))
def test_false_on_sub(self):
class Foo(object):pass
class Bar(Foo):pass
mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True)
mapper(Bar, inherits=Foo, polymorphic_identity=False)
sess = create_session()
b1 = Bar()
sess.add(b1)
sess.flush()
assert b1.type is False
sess.expunge_all()
assert isinstance(sess.query(Foo).one(), Bar)
def test_false_on_base(self):
class Ding(object):pass
class Bat(Ding):pass
mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False)
mapper(Bat, inherits=Ding, polymorphic_identity=True)
sess = create_session()
d1 = Ding()
sess.add(d1)
sess.flush()
assert d1.type is False
sess.expunge_all()
assert sess.query(Ding).one() is not None
class PolymorphicSynonymTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(10), nullable=False),
Column('info', String(255)))
t2 = Table('t2', metadata,
Column('id', Integer, ForeignKey('t1.id'), primary_key=True),
Column('data', String(10), nullable=False))
def test_polymorphic_synonym(self):
class T1(_fixtures.Base):
def info(self):
return "THE INFO IS:" + self._info
def _set_info(self, x):
self._info = x
info = property(info, _set_info)
class T2(T1):pass
mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1', properties={
'info':synonym('_info', map_column=True)
})
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
sess = create_session()
at1 = T1(info='at1')
at2 = T2(info='at2', data='t2 data')
sess.add(at1)
sess.add(at2)
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.info=='at2').one(), at2)
eq_(at2.info, "THE INFO IS:at2")
class CascadeTest(_base.MappedTest):
"""that cascades on polymorphic relationships continue
cascading along the path of the instance's mapper, not
the base mapper."""
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3, t4
t1= Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30))
)
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('t1id', Integer, ForeignKey('t1.id')),
Column('type', String(30)),
Column('data', String(30))
)
t3 = Table('t3', metadata,
Column('id', Integer, ForeignKey('t2.id'), primary_key=True),
Column('moredata', String(30)))
t4 = Table('t4', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('t3id', Integer, ForeignKey('t3.id')),
Column('data', String(30)))
def test_cascade(self):
class T1(_fixtures.Base):
pass
class T2(_fixtures.Base):
pass
class T3(T2):
pass
class T4(_fixtures.Base):
pass
mapper(T1, t1, properties={
't2s':relationship(T2, cascade="all")
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(T3, t3, inherits=T2, polymorphic_identity='t3', properties={
't4s':relationship(T4, cascade="all")
})
mapper(T4, t4)
sess = create_session()
t1_1 = T1(data='t1')
t3_1 = T3(data ='t3', moredata='t3')
t2_1 = T2(data='t2')
t1_1.t2s.append(t2_1)
t1_1.t2s.append(t3_1)
t4_1 = T4(data='t4')
t3_1.t4s.append(t4_1)
sess.add(t1_1)
assert t4_1 in sess.new
sess.flush()
sess.delete(t1_1)
assert t4_1 in sess.deleted
sess.flush()
class M2OUseGetTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30))
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
)
Table('related', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('sub_id', Integer, ForeignKey('sub.id')),
)
@testing.resolve_artifact_names
def test_use_get(self):
# test [ticket:1186]
class Base(_base.BasicEntity):
pass
class Sub(Base):
pass
class Related(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b')
mapper(Sub, sub, inherits=Base, polymorphic_identity='s')
mapper(Related, related, properties={
# previously, this was needed for the comparison to occur:
# the 'primaryjoin' looks just like "Sub"'s "get" clause (based on the Base id),
# and foreign_keys since that join condition doesn't actually have any fks in it
#'sub':relationship(Sub, primaryjoin=base.c.id==related.c.sub_id, foreign_keys=related.c.sub_id)
# now we can use this:
'sub':relationship(Sub)
})
assert class_mapper(Related).get_property('sub').strategy.use_get
sess = create_session()
s1 = Sub()
r1 = Related(sub=s1)
sess.add(r1)
sess.flush()
sess.expunge_all()
r1 = sess.query(Related).first()
s1 = sess.query(Sub).first()
def go():
assert r1.sub
self.assert_sql_count(testing.db, go, 0)
class GetTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30)),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('foo_id', Integer, ForeignKey('foo.id')),
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('data', String(20)))
@classmethod
def setup_classes(cls):
class Foo(_base.BasicEntity):
pass
class Bar(Foo):
pass
class Blub(Bar):
pass
def test_get_polymorphic(self):
self._do_get_test(True)
def test_get_nonpolymorphic(self):
self._do_get_test(False)
@testing.resolve_artifact_names
def _do_get_test(self, polymorphic):
if polymorphic:
mapper(Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity='foo')
mapper(Bar, bar, inherits=Foo, polymorphic_identity='bar')
mapper(Blub, blub, inherits=Bar, polymorphic_identity='blub')
else:
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
mapper(Blub, blub, inherits=Bar)
sess = create_session()
f = Foo()
b = Bar()
bl = Blub()
sess.add(f)
sess.add(b)
sess.add(bl)
sess.flush()
if polymorphic:
def go():
assert sess.query(Foo).get(f.id) is f
assert sess.query(Foo).get(b.id) is b
assert sess.query(Foo).get(bl.id) is bl
assert sess.query(Bar).get(b.id) is b
assert sess.query(Bar).get(bl.id) is bl
assert sess.query(Blub).get(bl.id) is bl
# test class mismatches - item is present
# in the identity map but we requested a subclass
assert sess.query(Blub).get(f.id) is None
assert sess.query(Blub).get(b.id) is None
assert sess.query(Bar).get(f.id) is None
self.assert_sql_count(testing.db, go, 0)
else:
# this is testing the 'wrong' behavior of using get()
# polymorphically with mappers that are not configured to be
# polymorphic. the important part being that get() always
# returns an instance of the query's type.
def go():
assert sess.query(Foo).get(f.id) is f
bb = sess.query(Foo).get(b.id)
assert isinstance(b, Foo) and bb.id==b.id
bll = sess.query(Foo).get(bl.id)
assert isinstance(bll, Foo) and bll.id==bl.id
assert sess.query(Bar).get(b.id) is b
bll = sess.query(Bar).get(bl.id)
assert isinstance(bll, Bar) and bll.id == bl.id
assert sess.query(Blub).get(bl.id) is bl
self.assert_sql_count(testing.db, go, 3)
class EagerLazyTest(_base.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests that
LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
global foo, bar, bar_foo
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('data', String(30)))
bar_foo = Table('bar_foo', metadata,
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('foo_id', Integer, ForeignKey('foo.id'))
)
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
foos = mapper(Foo, foo)
bars = mapper(Bar, bar, inherits=foos)
bars.add_property('lazy', relationship(foos, bar_foo, lazy='select'))
bars.add_property('eager', relationship(foos, bar_foo, lazy='joined'))
foo.insert().execute(data='foo1')
bar.insert().execute(id=1, data='bar1')
foo.insert().execute(data='foo2')
bar.insert().execute(id=2, data='bar2')
foo.insert().execute(data='foo3') #3
foo.insert().execute(data='foo4') #4
bar_foo.insert().execute(bar_id=1, foo_id=3)
bar_foo.insert().execute(bar_id=2, foo_id=4)
sess = create_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
class EagerTargetingTest(_base.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('type', String(30), nullable=False),
Column('parent_id', Integer, ForeignKey('a_table.id'))
)
Table('b_table', metadata,
Column('id', Integer, ForeignKey('a_table.id'), primary_key=True),
Column('b_data', String(50)),
)
@testing.resolve_artifact_names
def test_adapt_stringency(self):
class A(_base.ComparableEntity):
pass
class B(A):
pass
mapper(A, a_table, polymorphic_on=a_table.c.type, polymorphic_identity='A',
properties={
'children': relationship(A, order_by=a_table.c.name)
})
mapper(B, b_table, inherits=A, polymorphic_identity='B', properties={
'b_derived':column_property(b_table.c.b_data + "DATA")
})
sess=create_session()
b1=B(id=1, name='b1',b_data='i')
sess.add(b1)
sess.flush()
b2=B(id=2, name='b2', b_data='l', parent_id=1)
sess.add(b2)
sess.flush()
bid=b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
sess.expunge_all()
node = sess.query(B).options(joinedload(B.children)).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
class FlushTest(_base.MappedTest):
"""test dependency sorting among inheriting mappers"""
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('email', String(128)),
Column('password', String(16)),
)
Table('roles', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('description', String(32))
)
Table('user_roles', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True)
)
Table('admins', metadata,
Column('admin_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id'))
)
@testing.resolve_artifact_names
def test_one(self):
class User(object):pass
class Role(object):pass
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
sess = create_session()
adminrole = Role()
sess.add(adminrole)
sess.flush()
# create an Admin, and append a Role. the dependency processors
# corresponding to the "roles" attribute for the Admin mapper and the User mapper
# have to ensure that two dependency processors dont fire off and insert the
# many to many row twice.
a = Admin()
a.roles.append(adminrole)
a.password = 'admin'
sess.add(a)
sess.flush()
assert user_roles.count().scalar() == 1
@testing.resolve_artifact_names
def test_two(self):
class User(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
class Role(object):
def __init__(self, description=None):
self.description = description
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
# create roles
adminrole = Role('admin')
sess = create_session()
sess.add(adminrole)
sess.flush()
# create admin user
a = Admin(email='tim', password='admin')
a.roles.append(adminrole)
sess.add(a)
sess.flush()
a.password = 'sadmin'
sess.flush()
assert user_roles.count().scalar() == 1
class VersioningTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('version_id', Integer, nullable=False),
Column('value', String(40)),
Column('discriminator', Integer, nullable=False)
)
Table('subtable', metadata,
Column('id', None, ForeignKey('base.id'), primary_key=True),
Column('subdata', String(50))
)
Table('stuff', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent', Integer, ForeignKey('base.id'))
)
@testing.emits_warning(r".*updated rowcount")
@engines.close_open_connections
@testing.resolve_artifact_names
def test_save_update(self):
class Base(_fixtures.Base):
pass
class Sub(Base):
pass
class Stuff(Base):
pass
mapper(Stuff, stuff)
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id,
polymorphic_identity=1, properties={
'stuff':relationship(Stuff)
})
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
sess.add(b1)
sess.add(s1)
sess.flush()
sess2 = create_session()
s2 = sess2.query(Base).get(s1.id)
s2.subdata = 'sess2 subdata'
s1.subdata = 'sess1 subdata'
sess.flush()
assert_raises(orm_exc.ConcurrentModificationError,
sess2.query(Base).with_lockmode('read').get,
s1.id)
if not testing.db.dialect.supports_sane_rowcount:
sess2.flush()
else:
assert_raises(orm_exc.ConcurrentModificationError, sess2.flush)
sess2.refresh(s2)
if testing.db.dialect.supports_sane_rowcount:
assert s2.subdata == 'sess1 subdata'
s2.subdata = 'sess2 subdata'
sess2.flush()
@testing.emits_warning(r".*updated rowcount")
@testing.resolve_artifact_names
def test_delete(self):
class Base(_fixtures.Base):
pass
class Sub(Base):
pass
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id, polymorphic_identity=1)
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
s2 = Sub(value='sub2', subdata='some other subdata')
sess.add(b1)
sess.add(s1)
sess.add(s2)
sess.flush()
sess2 = create_session()
s3 = sess2.query(Base).get(s1.id)
sess2.delete(s3)
sess2.flush()
s2.subdata = 'some new subdata'
sess.flush()
try:
s1.subdata = 'some new subdata'
sess.flush()
assert not testing.db.dialect.supports_sane_rowcount
except orm_exc.ConcurrentModificationError, e:
assert True
class DistinctPKTest(_base.MappedTest):
"""test the construction of mapper.primary_key when an inheriting relationship
joins on a column other than primary key column."""
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
global person_table, employee_table, Person, Employee
person_table = Table("persons", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("name", String(80)),
)
employee_table = Table("employees", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("salary", Integer),
Column("person_id", Integer, ForeignKey("persons.id")),
)
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person): pass
@classmethod
def insert_data(cls):
person_insert = person_table.insert()
person_insert.execute(id=1, name='alice')
person_insert.execute(id=2, name='bob')
employee_insert = employee_table.insert()
employee_insert.execute(id=2, salary=250, person_id=1) # alice
employee_insert.execute(id=3, salary=200, person_id=2) # bob
def test_implicit(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper)
assert list(class_mapper(Employee).primary_key) == [person_table.c.id]
def test_explicit_props(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper,
properties={'pid':person_table.c.id,
'eid':employee_table.c.id})
self._do_test(True)
def test_explicit_composite_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table,
inherits=person_mapper,
primary_key=[person_table.c.id, employee_table.c.id])
assert_raises_message(sa_exc.SAWarning,
r"On mapper Mapper\|Employee\|employees, "
"primary key column 'employees.id' is being "
"combined with distinct primary key column 'persons.id' "
"in attribute 'id'. Use explicit properties to give "
"each column its own mapped attribute name.",
self._do_test, True
)
def test_explicit_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id])
self._do_test(False)
def _do_test(self, composite):
session = create_session()
query = session.query(Employee)
if composite:
alice1 = query.get([1,2])
bob = query.get([2,3])
alice2 = query.get([1,2])
else:
alice1 = query.get(1)
bob = query.get(2)
alice2 = query.get(1)
assert alice1.name == alice2.name == 'alice'
assert bob.name == 'bob'
class SyncCompileTest(_base.MappedTest):
"""test that syncrules compile properly on custom inherit conds"""
@classmethod
def define_tables(cls, metadata):
global _a_table, _b_table, _c_table
_a_table = Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data1', String(128))
)
_b_table = Table('b', metadata,
Column('a_id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data2', String(128))
)
_c_table = Table('c', metadata,
# Column('a_id', Integer, ForeignKey('b.a_id'), primary_key=True), #works
Column('b_a_id', Integer, ForeignKey('b.a_id'), primary_key=True),
Column('data3', String(128))
)
def test_joins(self):
for j1 in (None, _b_table.c.a_id==_a_table.c.id, _a_table.c.id==_b_table.c.a_id):
for j2 in (None, _b_table.c.a_id==_c_table.c.b_a_id,
_c_table.c.b_a_id==_b_table.c.a_id):
self._do_test(j1, j2)
for t in reversed(_a_table.metadata.sorted_tables):
t.delete().execute().close()
def _do_test(self, j1, j2):
class A(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class B(A):
pass
class C(B):
pass
mapper(A, _a_table)
mapper(B, _b_table, inherits=A,
inherit_condition=j1
)
mapper(C, _c_table, inherits=B,
inherit_condition=j2
)
session = create_session()
a = A(data1='a1')
session.add(a)
b = B(data1='b1', data2='b2')
session.add(b)
c = C(data1='c1', data2='c2', data3='c3')
session.add(c)
session.flush()
session.expunge_all()
assert len(session.query(A).all()) == 3
assert len(session.query(B).all()) == 2
assert len(session.query(C).all()) == 1
class OverrideColKeyTest(_base.MappedTest):
"""test overriding of column attributes."""
@classmethod
def define_tables(cls, metadata):
global base, subtable
base = Table('base', metadata,
Column('base_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(255)),
Column('sqlite_fixer', String(10))
)
subtable = Table('subtable', metadata,
Column('base_id', Integer, ForeignKey('base.base_id'), primary_key=True),
Column('subdata', String(255))
)
def test_plain(self):
# control case
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
# Sub gets a "base_id" property using the "base_id"
# column of both tables.
eq_(
class_mapper(Sub).get_property('base_id').columns,
[base.c.base_id, subtable.c.base_id]
)
def test_override_explicit(self):
# this pattern is what you see when using declarative
# in particular, here we do a "manual" version of
# what we'd like the mapper to do.
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base, properties={
# this is the manual way to do it, is not really
# possible in declarative
'id':[base.c.base_id, subtable.c.base_id]
})
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id, subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).get(10) is s1
def test_override_onlyinparent(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base)
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id]
)
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
s2 = Sub()
s2.base_id = 15
sess = create_session()
sess.add_all([s1, s2])
sess.flush()
# s1 gets '10'
assert sess.query(Sub).get(10) is s1
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
def test_override_implicit(self):
# this is how the pattern looks intuitively when
# using declarative.
# fixed as part of [ticket:1111]
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base, properties={
'id':subtable.c.base_id
})
# Sub mapper compilation needs to detect that "base.c.base_id"
# is renamed in the inherited mapper as "id", even though
# it has its own "id" property. Sub's "id" property
# gets joined normally with the extra column.
eq_(
set(class_mapper(Sub).get_property('id').columns),
set([base.c.base_id, subtable.c.base_id])
)
s1 = Sub()
s1.id = 10
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).get(10) is s1
def test_plain_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class Base(object):
pass
class Sub(Base):
@property
def data(self):
return "im the data"
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_custom_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class MyDesc(object):
def __get__(self, instance, owner):
if instance is None:
return self
return "im the data"
class Base(object):
pass
class Sub(Base):
data = MyDesc()
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_sub_columns_over_base_descriptors(self):
class Base(object):
@property
def subdata(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.subdata == "this is base"
s1 = Sub()
s1.subdata = "this is sub"
assert s1.subdata == "this is sub"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).subdata == "this is base"
assert sess.query(Sub).get(s1.base_id).subdata == "this is sub"
def test_base_descriptors_over_base_cols(self):
class Base(object):
@property
def data(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.data == "this is base"
s1 = Sub()
assert s1.data == "this is base"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).data == "this is base"
assert sess.query(Sub).get(s1.base_id).data == "this is base"
class OptimizedLoadTest(_base.MappedTest):
"""tests for the "optimized load" routine."""
@classmethod
def define_tables(cls, metadata):
global base, sub, with_comp
base = Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)),
Column('type', String(50))
)
sub = Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('sub', String(50))
)
with_comp = Table('with_comp', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('a', String(10)),
Column('b', String(10))
)
def test_optimized_passes(self):
""""test that the 'optimized load' routine doesn't crash when
a column in the join condition is not available."""
class Base(_base.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
# redefine Sub's "id" to favor the "id" col in the subtable.
# "id" is also part of the primary join condition
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={'id':sub.c.id})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
# load s1 via Base. s1.id won't populate since it's relative to
# the "sub" table. The optimized load kicks in and tries to
# generate on the primary join, but cannot since "id" is itself unloaded.
# the optimized load needs to return "None" so regular full-row loading proceeds
s1 = sess.query(Base).first()
assert s1.sub == 's1sub'
def test_column_expression(self):
class Base(_base.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat':column_property(sub.c.sub + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
s1 = sess.query(Base).first()
assert s1.concat == 's1sub|s1sub'
def test_column_expression_joined(self):
class Base(_base.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat':column_property(base.c.data + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
s2 = Sub(data='s2data', sub='s2sub')
s3 = Sub(data='s3data', sub='s3sub')
sess.add_all([s1, s2, s3])
sess.commit()
sess.expunge_all()
# query a bunch of rows to ensure there's no cartesian
# product against "base" occurring, it is in fact
# detecting that "base" needs to be in the join
# criterion
eq_(
sess.query(Base).order_by(Base.id).all(),
[
Sub(data='s1data', sub='s1sub', concat='s1data|s1sub'),
Sub(data='s2data', sub='s2sub', concat='s2data|s2sub'),
Sub(data='s3data', sub='s3sub', concat='s3data|s3sub')
]
)
def test_composite_column_joined(self):
class Base(_base.ComparableEntity):
pass
class WithComp(Base):
pass
class Comp(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __composite_values__(self):
return self.a, self.b
def __eq__(self, other):
return (self.a == other.a) and (self.b == other.b)
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(WithComp, with_comp, inherits=Base, polymorphic_identity='wc', properties={
'comp': composite(Comp, with_comp.c.a, with_comp.c.b)
})
sess = sessionmaker()()
s1 = WithComp(data='s1data', comp=Comp('ham', 'cheese'))
s2 = WithComp(data='s2data', comp=Comp('bacon', 'eggs'))
sess.add_all([s1, s2])
sess.commit()
sess.expunge_all()
s1test, s2test = sess.query(Base).order_by(Base.id).all()
assert s1test.comp
assert s2test.comp
eq_(s1test.comp, Comp('ham', 'cheese'))
eq_(s2test.comp, Comp('bacon', 'eggs'))
class PKDiscriminatorTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
parents = Table('parents', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(60)))
children = Table('children', metadata,
Column('id', Integer, ForeignKey('parents.id'), primary_key=True),
Column('type', Integer,primary_key=True),
Column('name', String(60)))
@testing.resolve_artifact_names
def test_pk_as_discriminator(self):
class Parent(object):
def __init__(self, name=None):
self.name = name
class Child(object):
def __init__(self, name=None):
self.name = name
class A(Child):
pass
mapper(Parent, parents, properties={
'children': relationship(Child, backref='parent'),
})
mapper(Child, children, polymorphic_on=children.c.type,
polymorphic_identity=1)
mapper(A, inherits=Child, polymorphic_identity=2)
s = create_session()
p = Parent('p1')
a = A('a1')
p.children.append(a)
s.add(p)
s.flush()
assert a.id
assert a.type == 2
p.name='p1new'
a.name='a1new'
s.flush()
s.expire_all()
assert a.name=='a1new'
assert p.name=='p1new'
class DeleteOrphanTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global single, parent
single = Table('single', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
Column('data', String(50)),
Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False),
)
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50))
)
def test_orphan_message(self):
class Base(_fixtures.Base):
pass
class SubClass(Base):
pass
class Parent(_fixtures.Base):
pass
mapper(Base, single, polymorphic_on=single.c.type, polymorphic_identity='base')
mapper(SubClass, inherits=Base, polymorphic_identity='sub')
mapper(Parent, parent, properties={
'related':relationship(Base, cascade="all, delete-orphan")
})
sess = create_session()
s1 = SubClass(data='s1')
sess.add(s1)
assert_raises_message(orm_exc.FlushError,
r"is not attached to any parent 'Parent' instance via "
"that classes' 'related' attribute", sess.flush)
| {
"content_hash": "2be8db3313c73626d97c6193708e171b",
"timestamp": "",
"source": "github",
"line_count": 1262,
"max_line_length": 108,
"avg_line_length": 33.401743264659274,
"alnum_prop": 0.542262709652931,
"repo_name": "simplegeo/sqlalchemy",
"id": "2f9295e17e8170ef7de497d4475707d4e29ef404",
"size": "42153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/inheritance/test_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30110"
},
{
"name": "JavaScript",
"bytes": "26336"
},
{
"name": "Python",
"bytes": "5012225"
}
],
"symlink_target": ""
} |
import os, sys, time
folders = sys.argv[1:]
nameWidth = max([len(f) for f in folders])
currSize = dict((x, 0) for x in folders)
totalSize = dict((x, 0) for x in folders)
maxSize = dict((x, 0) for x in folders)
fmts = "%*s %13s%s %13s%s %13s%s"
n = 0
while True:
print fmts % (nameWidth, "directory", "curr size", " ", "avg size", " ", "max size", " ")
n += 1
for folder in folders:
try:
bytes = sum( os.path.getsize(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk( folder )
for filename in filenames )
oldSize = currSize[folder]
oldAvg = 1 if n == 1 else totalSize[folder]/(n-1)
oldMax = maxSize[folder]
currSize[folder] = bytes
totalSize[folder] += bytes
maxSize[folder] = max(maxSize[folder], bytes)
avg = totalSize[folder] / n
print fmts % (nameWidth, folder,
"{:,}".format(bytes), "+" if bytes > oldSize else "-" if bytes < oldSize else " ",
"{:,}".format(avg), "+" if avg > oldAvg else "-" if avg < oldAvg else " ",
"{:,}".format(maxSize[folder]), "+" if maxSize[folder] > oldMax else " ")
except:
pass
print ""
time.sleep(2)
| {
"content_hash": "ddd676c1e1261ff4b77570cb59a49469",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 110,
"avg_line_length": 38.94285714285714,
"alnum_prop": 0.5077035950110051,
"repo_name": "jskora/scratch-nifi",
"id": "711514136e2189c7eab08f47b3c4152e26583164",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dirMonitor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "1604"
},
{
"name": "Java",
"bytes": "269087"
},
{
"name": "Python",
"bytes": "11170"
},
{
"name": "Shell",
"bytes": "62225"
}
],
"symlink_target": ""
} |
"""
Tests the h5py.File object.
"""
from __future__ import absolute_import
import threading
import h5py
from ..common import ut, TestCase
class TestErrorPrinting(TestCase):
"""
Verify the error printing is squashed in all threads.
"""
def test_printing(self):
""" No console messages should be shown from membership tests """
# Unfortunately we can't have this test assert anything, as
# HDF5 writes directly to stderr. But it will show up in the
# console output.
import threading
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
try:
doesnt_exist = newfile['doesnt_exist'].value
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
def test_attr_printing(self):
""" No console messages should be shown for non-existing attributes """
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
newfile['newdata'] = [1,2,3]
try:
nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
| {
"content_hash": "1363dfcfae62b7dd3e2228fc8e066334",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 26.326923076923077,
"alnum_prop": 0.5427319211102994,
"repo_name": "ryfeus/lambda-packs",
"id": "49e76d31803a6705063f87de0ff68aaf959eb784",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HDF4_H5_NETCDF/source2.7/h5py/tests/hl/test_threads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
import vtkdevide
class greyReconstruct(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
self._greyReconstruct = vtkdevide.vtkImageGreyscaleReconstruct3D()
NoConfigModuleMixin.__init__(
self,
{'Module (self)' : self,
'vtkImageGreyscaleReconstruct3D' : self._greyReconstruct})
module_utils.setup_vtk_object_progress(
self, self._greyReconstruct,
'Performing greyscale reconstruction')
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._greyReconstruct
def get_input_descriptions(self):
return ('Mask image I (VTK)', 'Marker image J (VTK)')
def set_input(self, idx, inputStream):
if idx == 0:
self._greyReconstruct.SetInput1(inputStream)
else:
self._greyReconstruct.SetInput2(inputStream)
def get_output_descriptions(self):
return ('Reconstructed image (VTK)', )
def get_output(self, idx):
return self._greyReconstruct.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
self._greyReconstruct.Update()
| {
"content_hash": "c22d14d1cdc4711958a935bdbbe0738d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 27.464788732394368,
"alnum_prop": 0.6241025641025642,
"repo_name": "chrisidefix/devide",
"id": "8842acbc574af328ef1ebc4b7b4106d81c2281e1",
"size": "1950",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/filters/greyReconstruct.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Diff",
"bytes": "1373"
},
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
from .. import utilities
def test_header_population_returns_list_of_dicts():
fake_headers = ['one', 'two', 'three']
populated = utilities.header_population(fake_headers)
assert type(populated) == list
assert type(populated[0]) == dict
assert len(populated) == 3
def test_data_population_returns_list_of_dicts():
fake_headers = ['one', 'two', 'three']
fake_data = [['a', 'b', 'c'], ['1', '2', '3']]
populated = utilities.data_population(fake_data, fake_headers)
assert type(populated) == list
assert type(populated[0]) == dict
assert len(populated) == 2
def test_data_population_without_headers():
fake_data = [['one', 'two', 'three'], ['a', 'b', 'c'], ['1', '2', '3']]
populated = utilities.data_population(fake_data)
assert type(populated) == list
assert type(populated[0]) == dict
assert len(populated) == 3
| {
"content_hash": "0a9aa6c0afb743266abc896c98d54907",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 33.84615384615385,
"alnum_prop": 0.6261363636363636,
"repo_name": "chrisseto/modular-file-renderer",
"id": "021a3b1564dae833388f9ed662d2a2d02e717af4",
"size": "880",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "mfr/ext/tabular/tests/test_utilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "115212"
},
{
"name": "JavaScript",
"bytes": "2815665"
},
{
"name": "Makefile",
"bytes": "6777"
},
{
"name": "Python",
"bytes": "110271"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
import mock
from oslo_utils import uuidutils
from neutron.agent.l3 import ha_router
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
self.router_id = _uuid()
return ha_router.HaRouter(mock.sentinel.enqueue_state,
self.router_id,
router,
self.agent_conf,
mock.sentinel.driver,
**kwargs)
def test_get_router_cidrs_returns_ha_cidrs(self):
ri = self._create_router()
device = mock.MagicMock()
device.name.return_value = 'eth2'
addresses = ['15.1.2.2/24', '15.1.2.3/32']
ri._get_cidrs_from_keepalived = mock.MagicMock(return_value=addresses)
self.assertEqual(set(addresses), ri.get_router_cidrs(device))
def test__add_default_gw_virtual_route(self):
ri = self._create_router()
mock_instance = mock.Mock()
mock_instance.virtual_routes.gateway_routes = []
ri._get_keepalived_instance = mock.Mock(return_value=mock_instance)
subnets = [{'id': _uuid(),
'cidr': '20.0.0.0/24',
'gateway_ip': None}]
ex_gw_port = {'fixed_ips': [],
'subnets': subnets,
'extra_subnets': [],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
# Make sure no exceptional code
ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc')
self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes))
subnets.append({'id': _uuid(),
'cidr': '30.0.0.0/24',
'gateway_ip': '30.0.0.1'})
ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc')
self.assertEqual(1, len(mock_instance.virtual_routes.gateway_routes))
subnets[1]['gateway_ip'] = None
ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc')
self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes))
| {
"content_hash": "b46cd2fef35a02cf7d8768a84334ca95",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 40.28333333333333,
"alnum_prop": 0.5436491518411254,
"repo_name": "sebrandon1/neutron",
"id": "dc67327ce18c773e9b50353c8d606f5b53da5285",
"size": "3034",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/l3/test_ha_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9903006"
},
{
"name": "Shell",
"bytes": "14339"
}
],
"symlink_target": ""
} |
from phycas.utilities.PhycasCommand import *
from phycas.utilities.CommonFunctions import CommonFunctions
from phycas.commands.CPOImpl import CPOImpl
from phycas import mcmc
class CPO(PhycasCommand):
def __init__(self):
args = (("patterns_only", False, "If True, each row of the sitelike output file will contain sampled log-likelihoods for each pattern, with the first row of the file holding the counts for each pattern. If False, the rows of the sitelike file will contain the log-likelihoods for each site in the order in which the sites occur in the data file (generally produces a larger file).", BoolArgValidate),)
# Specify output options
o = PhycasCommandOutputOptions()
o.__dict__["_help_order"] = ["sitelike"]
p = TextOutputSpec(prefix='sitelike', suffix=".txt", help_str="The text file in which all sampled site log-likelihood values are saved.")
o.__dict__["sitelike"] = p
PhycasCommand.__init__(self, args, "cpo", "Performs a Conditional Predictive Ordinate (CPO) analysis to determine the relative fit of the model to individual sites/characters.", o)
# The data members added below are hidden from the user because they are set when the mcmc command runs
#self.__dict__["sampled_likes"] = None
#self.__dict__["sampled_betas"] = None
self.__dict__["sitelikef"] = None
def hidden():
"""
Overrides the PhycasCommand.hidden method to keep CPO's name from being displayed
in the list of classes displayed when users type help. Delete this function, or
change its return value to False, when it is ready to be advertised.
"""
return False
hidden = staticmethod(hidden)
def checkSanity(self):
"""
Place asserts in this function that should be checked before anything substantive
is done during a call of a CPO object.
"""
#cf = CommonFunctions(self)
#cf.phycassert(mcmc.ncycles > 0, 'mcmc.ncycles cannot be less than 1 for path sampling')
def __call__(self, **kwargs):
self.set(**kwargs)
self.checkSanity()
c = copy.deepcopy(self)
cpo_impl = CPOImpl(c)
cpo_impl.siteLikeFileOpen()
if cpo_impl.sitelikef is not None:
mcmc.sitelikef = cpo_impl.sitelikef
mcmc.saving_sitelikes = True
mcmc()
mcmc.saving_sitelikes = False
cpo_impl.siteLikeFileClose()
else:
print 'Could not run the cpo command because the sitelike file could not be opened'
| {
"content_hash": "7383e091ebba49c7b4906714aaf07286",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 409,
"avg_line_length": 48.77358490566038,
"alnum_prop": 0.662669245647969,
"repo_name": "plewis/phycas",
"id": "5a212992c29927ffabda90a64b659a6843d0af5c",
"size": "2585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/commands/CPO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7082"
},
{
"name": "C",
"bytes": "48372"
},
{
"name": "C++",
"bytes": "2149872"
},
{
"name": "HTML",
"bytes": "937"
},
{
"name": "OpenEdge ABL",
"bytes": "2018215"
},
{
"name": "Perl",
"bytes": "1704571"
},
{
"name": "Perl6",
"bytes": "4326"
},
{
"name": "Python",
"bytes": "1213646"
},
{
"name": "Shell",
"bytes": "29270"
},
{
"name": "Terra",
"bytes": "1375348"
}
],
"symlink_target": ""
} |
import base64
import string
import urllib2
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from cinder import context
from cinder import exception
from cinder.i18n import _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
XIO_OPTS = [
cfg.IntOpt('ise_storage_pool', default=1,
help='Default storage pool for volumes.'),
cfg.IntOpt('ise_raid', default=1,
help='Raid level for ISE volumes.'),
cfg.IntOpt('ise_connection_retries', default=5,
help='Number of retries (per port) when establishing '
'connection to ISE management port.'),
cfg.IntOpt('ise_retry_interval', default=1,
help='Interval (secs) between retries.'),
cfg.IntOpt('ise_completion_retries', default=30,
help='Number on retries to get completion status after '
'issuing a command to ISE.'),
]
CONF = cfg.CONF
CONF.register_opts(XIO_OPTS)
LOG = logging.getLogger(__name__)
OPERATIONAL_STATUS = 'OPERATIONAL'
PREPARED_STATUS = 'PREPARED'
INVALID_STATUS = 'VALID'
# Raise exception for X-IO driver
def RaiseXIODriverException():
raise exception.XIODriverException()
class XIOISEDriver(object):
VERSION = '1.1.2'
# Version Changes
# 1.0.0 Base driver
# 1.1.0 QoS, affinity, retype and thin support
# 1.1.1 Fix retry loop (Bug 1429283)
# 1.1.2 Fix host object deletion (Bug 1433450).
def __init__(self, *args, **kwargs):
super(XIOISEDriver, self).__init__()
LOG.debug("XIOISEDriver __init__ called.")
self.configuration = kwargs.get('configuration', None)
self.ise_primary_ip = ''
self.ise_secondary_ip = ''
self.newquery = 1
self.ise_globalid = None
self._vol_stats = {}
def do_setup(self, context):
LOG.debug("XIOISEDriver do_setup called.")
self._get_ise_globalid()
def check_for_setup_error(self):
LOG.debug("XIOISEDriver check_for_setup_error called.")
# The san_ip must always be set
if self.configuration.san_ip == "":
msg = _LE("san ip must be configured!")
LOG.error(msg)
RaiseXIODriverException()
# The san_login must always be set
if self.configuration.san_login == "":
msg = _LE("san_login must be configured!")
LOG.error(msg)
RaiseXIODriverException()
# The san_password must always be set
if self.configuration.san_password == "":
msg = _LE("san_password must be configured!")
LOG.error(msg)
RaiseXIODriverException()
return
def _get_version(self):
"""Return driver version."""
return self.VERSION
def _send_query(self):
"""Do initial query to populate ISE global id."""
body = ''
url = '/query'
resp = self._connect('GET', url, body)
status = resp['status']
if status != 200:
# unsuccessful - this is fatal as we need the global id
# to build REST requests.
msg = _LE("Array query failed - No response (%d)!") % status
LOG.error(msg)
RaiseXIODriverException()
# Successfully fetched QUERY info. Parse out globalid along with
# ipaddress for Controller 1 and Controller 2. We assign primary
# ipaddress to use based on controller rank
xml_tree = etree.fromstring(resp['content'])
# first check that the ISE is running a supported FW version
support = {}
support['thin'] = False
support['clones'] = False
support['thin-clones'] = False
self.configuration.ise_affinity = False
self.configuration.ise_qos = False
capabilities = xml_tree.find('capabilities')
if capabilities is None:
msg = _LE("Array query failed. No capabilities in response!")
LOG.error(msg)
RaiseXIODriverException()
for node in capabilities:
if node.tag != 'capability':
continue
capability = node
if capability.attrib['value'] == '49003':
self.configuration.ise_affinity = True
elif capability.attrib['value'] == '49004':
self.configuration.ise_qos = True
elif capability.attrib['value'] == '49005':
support['thin'] = True
elif capability.attrib['value'] == '49006':
support['clones'] = True
elif capability.attrib['value'] == '49007':
support['thin-clones'] = True
# Make sure ISE support necessary features
if not support['clones']:
msg = _LE("ISE FW version is not compatible with Openstack!")
LOG.error(msg)
RaiseXIODriverException()
# set up thin provisioning support
self.configuration.san_thin_provision = support['thin-clones']
# Fill in global id, primary and secondary ip addresses
globalid = xml_tree.find('globalid')
if globalid is None:
msg = _LE("Array query failed. No global id in XML response!")
LOG.error(msg)
RaiseXIODriverException()
self.ise_globalid = globalid.text
controllers = xml_tree.find('controllers')
if controllers is None:
msg = _LE("Array query failed. No controllers in response!")
LOG.error(msg)
RaiseXIODriverException()
for node in controllers:
if node.tag != 'controller':
continue
# found a controller node
controller = node
ipaddress = controller.find('ipaddress')
ranktag = controller.find('rank')
if ipaddress is None:
continue
# found an ipaddress tag
# make sure rank tag is present
if ranktag is None:
continue
rank = ranktag.attrib['value']
# make sure rank value is present
if rank is None:
continue
if rank == '1':
# rank 1 means primary (xo)
self.ise_primary_ip = ipaddress.text
LOG.debug('Setting primary IP to: %s.',
self.ise_primary_ip)
elif rank == '0':
# rank 0 means secondary (nxo)
self.ise_secondary_ip = ipaddress.text
LOG.debug('Setting secondary IP to: %s.',
self.ise_secondary_ip)
# clear out new query request flag on successful fetch of QUERY info.
self.newquery = 0
return support
def _get_ise_globalid(self):
"""Return ISE globalid."""
if self.ise_globalid is None or self.newquery == 1:
# this call will populate globalid
self._send_query()
if self.ise_globalid is None:
msg = _LE("ISE globalid not set!")
LOG.error(msg)
RaiseXIODriverException()
return self.ise_globalid
def _get_ise_primary_ip(self):
"""Return Primary IP address to REST API."""
if self.ise_primary_ip == '':
# Primary IP is set to ISE IP passed in from cinder.conf
self.ise_primary_ip = self.configuration.san_ip
if self.ise_primary_ip == '':
# No IP - fatal.
msg = _LE("Primary IP must be set!")
LOG.error(msg)
RaiseXIODriverException()
return self.ise_primary_ip
def _get_ise_secondary_ip(self):
"""Return Secondary IP address to REST API."""
if self.ise_secondary_ip != '':
return self.ise_secondary_ip
def _get_uri_prefix(self):
"""Returns prefix in form of http(s)://1.2.3.4"""
prefix = ''
# figure out if http or https should be used
if self.configuration.driver_use_ssl:
prefix = 'https://'
else:
prefix = 'http://'
# add the IP address
prefix += self._get_ise_primary_ip()
return prefix
def _opener(self, method, url, body, header):
"""Wrapper to handle connection"""
response = {}
response['status'] = 0
response['content'] = ''
response['location'] = ''
# send the request
req = urllib2.Request(url, body, header)
# Override method to allow GET, PUT, POST, DELETE
req.get_method = lambda: method
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError as err:
# HTTP error. Return HTTP status and content and let caller
# handle retries.
response['status'] = err.code
response['content'] = err.read()
except urllib2.URLError as err:
# Connection failure. Return a status of 0 to indicate error.
response['status'] = 0
else:
# Successful. Return status code, content,
# and location header, if present.
response['status'] = resp.getcode()
response['content'] = resp.read()
response['location'] = \
resp.info().getheader('Content-Location', '')
return response
def _help_call_method(self, args, retry_count):
"""Helper function used for prepare clone and delete REST calls."""
# This function calls request method and URL and checks the response.
# Certain cases allows for retries, while success and fatal status
# will fall out and tell parent to break out of loop.
# initialize remaining to one less than retries
remaining = retry_count
resp = self._send_cmd(args['method'], args['url'], args['arglist'])
status = resp['status']
if (status == 400):
reason = ''
if 'content' in resp:
reason = etree.fromstring(resp['content'])
if reason is not None:
reason = string.upper(reason.text)
if INVALID_STATUS in reason:
# Request failed with an invalid state. This can be because
# source volume is in a temporary unavailable state.
LOG.debug('REST call failed with invalid state: '
'%(method)s - %(status)d - %(reason)s',
{'method': args['method'],
'status': status, 'reason': reason})
# Let parent check retry eligibility based on remaining retries
remaining -= 1
else:
# Fatal error. Set remaining to 0 to make caller exit loop.
remaining = 0
else:
# set remaining to 0 to make caller exit loop
# original waiter will handle the difference between success and
# fatal error based on resp['status'].
remaining = 0
return (remaining, resp)
def _help_call_opener(self, args, retry_count):
"""Helper function to call _opener."""
# This function calls _opener func and checks the response.
# If response is 0 it will decrement the remaining retry count.
# On successful connection it will set remaining to 0 to signal
# parent to break out of loop.
remaining = retry_count
response = self._opener(args['method'], args['url'],
args['body'], args['header'])
if response['status'] != 0:
# We are done
remaining = 0
else:
# Let parent check retry eligibility based on remaining retries.
remaining -= 1
# Return remaining and response
return (remaining, response)
def _help_wait_for_status(self, args, retry_count):
"""Helper function to wait for specified volume status"""
# This function calls _get_volume_info and checks the response.
# If the status strings do not match the specified status it will
# return the remaining retry count decremented by one.
# On successful match it will set remaining to 0 to signal
# parent to break out of loop.
remaining = retry_count
info = self._get_volume_info(args['name'])
status = args['status_string']
if (status in info['string'] or status in info['details']):
remaining = 0
else:
# Let parent check retry eligibility based on remaining retries.
remaining -= 1
# return remaining and volume info
return (remaining, info)
def _wait_for_completion(self, help_func, args, retry_count):
"""Helper function to wait for completion of passed function"""
# Helper call loop function.
def _call_loop(loop_args):
remaining = loop_args['retries']
args = loop_args['args']
LOG.debug("In call loop (%d) %s", remaining, args)
(remaining, response) = loop_args['func'](args, remaining)
if remaining == 0:
# We are done - let our caller handle response
raise loopingcall.LoopingCallDone(response)
loop_args['retries'] = remaining
# Setup retries, interval and call wait function.
loop_args = {}
loop_args['retries'] = retry_count
loop_args['func'] = help_func
loop_args['args'] = args
interval = self.configuration.ise_retry_interval
timer = loopingcall.FixedIntervalLoopingCall(_call_loop, loop_args)
return timer.start(interval).wait()
def _connect(self, method, uri, body=''):
"""Set up URL and HTML and call _opener to make request"""
url = ''
# see if we need to add prefix
# this call will force primary ip to be filled in as well
prefix = self._get_uri_prefix()
if prefix not in uri:
url = prefix
url += uri
# set up headers for XML and Auth
header = {'Content-Type': 'application/xml; charset=utf-8'}
auth_key =\
base64.encodestring('%s:%s' %
(self.configuration.san_login,
self.configuration.san_password))[:-1]
header['Authorization'] = 'Basic %s' % auth_key
# We allow 5 retries on each IP address. If connection to primary
# fails, secondary will be tried. If connection to secondary is
# successful, the request flag for a new QUERY will be set. The QUERY
# will be sent on next connection attempt to figure out which
# controller is primary in case it has changed.
LOG.debug("Connect: %s %s %s", method, url, body)
using_secondary = 0
response = {}
response['status'] = 0
response['location'] = ''
response['content'] = ''
primary_ip = self._get_ise_primary_ip()
secondary_ip = self._get_ise_secondary_ip()
# This will first try connecting to primary IP and then secondary IP.
args = {}
args['method'] = method
args['url'] = url
args['body'] = body
args['header'] = header
retries = self.configuration.ise_connection_retries
while True:
response = self._wait_for_completion(self._help_call_opener,
args, retries)
if response['status'] != 0:
# Connection succeeded. Request new query on next connection
# attempt if we used secondary ip to sort out who should be
# primary going forward
self.newquery = using_secondary
return response
# connection failed - check if we have any retries left
if using_secondary == 0:
# connection on primary ip failed
# try secondary ip
if secondary_ip is '':
# if secondary is not setup yet, then assert
# connection on primary and secondary ip failed
msg = (_LE("Connection to %s failed and no secondary!") %
primary_ip)
LOG.error(msg)
RaiseXIODriverException()
# swap primary for secondary ip in URL
url = string.replace(url, primary_ip, secondary_ip)
LOG.debug('Trying secondary IP URL: %s', url)
using_secondary = 1
continue
# connection failed on both IPs - break out of the loop
break
# connection on primary and secondary ip failed
msg = (_LE("Could not connect to %(primary)s or %(secondary)s!") %
{'primary': primary_ip, 'secondary': secondary_ip})
LOG.error(msg)
RaiseXIODriverException()
def _param_string(self, params):
"""Turn (name, value) pairs into single param string"""
param_str = []
for name, value in params.items():
if value != '':
param_str.append("%s=%s" % (name, value))
return '&'.join(param_str)
def _send_cmd(self, method, url, params):
"""Prepare HTTP request and call _connect"""
# Add params to appropriate field based on method
body = ''
if method == 'GET':
if params != {}:
url += '?' + self._param_string(params)
body = ''
elif method == 'POST':
body = self._param_string(params)
elif method == 'DELETE':
body = ''
elif method == 'PUT':
if params != {}:
url += '?' + self._param_string(params)
# ISE REST API is mostly synchronous but has some asynchronous
# streaks. Add retries to work around design of ISE REST API that
# does not allow certain operations to be in process concurrently.
# This is only an issue if lots of CREATE/DELETE/SNAPSHOT/CLONE ops
# are issued in short order.
return self._connect(method, url, body)
def find_target_chap(self):
"""Return target CHAP settings"""
chap = {}
chap['chap_user'] = ''
chap['chap_passwd'] = ''
url = '/storage/arrays/%s/ionetworks' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
msg = _LW("IOnetworks GET failed (%d)") % status
LOG.warning(msg)
return chap
# Got a good response. Parse out CHAP info. First check if CHAP is
# enabled and if so parse out username and password.
root = etree.fromstring(resp['content'])
for element in root.iter():
if element.tag != 'chap':
continue
chapin = element.find('chapin')
if chapin is None:
continue
if chapin.attrib['value'] != '1':
continue
# CHAP is enabled. Store username / pw
chap_user = chapin.find('username')
if chap_user is not None:
chap['chap_user'] = chap_user.text
chap_passwd = chapin.find('password')
if chap_passwd is not None:
chap['chap_passwd'] = chap_passwd.text
break
return chap
def find_target_iqn(self, iscsi_ip):
"""Find Target IQN string"""
url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Controller GET failed (%d)") % status
LOG.error(msg)
RaiseXIODriverException()
# Good response. Parse out IQN that matches iscsi_ip_address
# passed in from cinder.conf. IQN is 'hidden' in globalid field.
root = etree.fromstring(resp['content'])
for element in root.iter():
if element.tag != 'ioport':
continue
ipaddrs = element.find('ipaddresses')
if ipaddrs is None:
continue
for ipaddr in ipaddrs.iter():
# Look for match with iscsi_ip_address
if ipaddr is None or ipaddr.text != iscsi_ip:
continue
endpoint = element.find('endpoint')
if endpoint is None:
continue
global_id = endpoint.find('globalid')
if global_id is None:
continue
target_iqn = global_id.text
if target_iqn != '':
return target_iqn
# Did not find a matching IQN. Upsetting.
msg = _LE("Failed to get IQN!")
LOG.error(msg)
RaiseXIODriverException()
def find_target_wwns(self):
"""Return target WWN"""
# Let's look for WWNs
target_wwns = []
target = ''
url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Controller GET failed (%d)") % status
LOG.error(msg)
RaiseXIODriverException()
# Good response. Parse out globalid (WWN) of endpoint that matches
# protocol and type (array).
controllers = etree.fromstring(resp['content'])
for controller in controllers.iter():
if controller.tag != 'controller':
continue
fcports = controller.find('fcports')
if fcports is None:
continue
for fcport in fcports:
if fcport.tag != 'fcport':
continue
wwn_tag = fcport.find('wwn')
if wwn_tag is None:
continue
target = wwn_tag.text
target_wwns.append(target)
return target_wwns
def _find_target_lun(self, location):
"""Return LUN for allocation specified in location string"""
resp = self._send_cmd('GET', location, {})
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Failed to get allocation information (%d)!") % status
LOG.error(msg)
RaiseXIODriverException()
# Good response. Parse out LUN.
xml_tree = etree.fromstring(resp['content'])
allocation = xml_tree.find('allocation')
if allocation is not None:
luntag = allocation.find('lun')
if luntag is not None:
return luntag.text
# Did not find LUN. Throw an exception.
msg = _LE("Failed to get LUN information!")
LOG.error(msg)
RaiseXIODriverException()
def _get_volume_info(self, vol_name):
"""Return status of ISE volume"""
vol_info = {}
vol_info['value'] = ''
vol_info['string'] = ''
vol_info['details'] = ''
vol_info['location'] = ''
vol_info['size'] = ''
# Attempt to collect status value, string and details. Also pick up
# location string from response. Location is used in REST calls
# DELETE/SNAPSHOT/CLONE.
# We ask for specific volume, so response should only contain one
# volume entry.
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {'name': vol_name})
if resp['status'] != 200:
msg = (_LW("Could not get status for %(name)s (%(status)d).") %
{'name': vol_name, 'status': resp['status']})
LOG.warning(msg)
return vol_info
# Good response. Parse down to Volume tag in list of one.
root = etree.fromstring(resp['content'])
volume_node = root.find('volume')
if volume_node is None:
msg = _LW("No volume node in XML content.")
LOG.warning(msg)
return vol_info
# Location can be found as an attribute in the volume node tag.
vol_info['location'] = volume_node.attrib['self']
# Find status tag
status = volume_node.find('status')
if status is None:
msg = _LW("No status payload for volume %s.") % vol_name
LOG.warning(msg)
return vol_info
# Fill in value and string from status tag attributes.
vol_info['value'] = status.attrib['value']
vol_info['string'] = string.upper(status.attrib['string'])
# Detailed status has it's own list of tags.
details = status.find('details')
if details is not None:
detail = details.find('detail')
if detail is not None:
vol_info['details'] = string.upper(detail.text)
# Get volume size
size_tag = volume_node.find('size')
if size_tag is not None:
vol_info['size'] = size_tag.text
# Return value, string, details and location.
return vol_info
def _alloc_location(self, volume, hostname, delete=0):
"""Find location string for allocation. Also delete alloc per reqst"""
location = ''
url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {'name': volume['name'],
'hostname': hostname})
if resp['status'] != 200:
msg = (_LE("Could not GET allocation information (%d)!") %
resp['status'])
LOG.error(msg)
RaiseXIODriverException()
# Good response. Find the allocation based on volume name.
allocation_tree = etree.fromstring(resp['content'])
for allocation in allocation_tree.iter():
if allocation.tag != 'allocation':
continue
# verify volume name match
volume_tag = allocation.find('volume')
if volume_tag is None:
continue
volumename_tag = volume_tag.find('volumename')
if volumename_tag is None:
continue
volumename = volumename_tag.text
if volumename != volume['name']:
continue
# verified volume name match
# find endpoints list
endpoints = allocation.find('endpoints')
if endpoints is None:
continue
# Found endpoints list. Found matching host if hostname specified,
# otherwise any host is a go. This is used by the caller to
# delete all allocations (presentations) to a volume.
for endpoint in endpoints.iter():
if hostname != '':
hname_tag = endpoint.find('hostname')
if hname_tag is None:
continue
if string.upper(hname_tag.text) != string.upper(hostname):
continue
# Found hostname match. Location string is an attribute in
# allocation tag.
location = allocation.attrib['self']
# Delete allocation if requested.
if delete == 1:
self._send_cmd('DELETE', location, {})
location = ''
break
else:
return location
return location
def _present_volume(self, volume, hostname, lun):
"""Present volume to host at specified LUN"""
# Set up params with volume name, host name and target lun, if
# specified.
target_lun = lun
params = {}
params = {'volumename': volume['name'],
'hostname': hostname}
# Fill in LUN if specified.
if target_lun != '':
params['lun'] = target_lun
# Issue POST call to allocation.
url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
resp = self._send_cmd('POST', url, params)
status = resp['status']
if status == 201:
LOG.info(_LI("Volume %s presented."), volume['name'])
elif status == 409:
msg = (_LW("Volume %(name)s already presented (%(status)d)!") %
{'name': volume['name'], 'status': status})
LOG.warning(msg)
else:
msg = (_LE("Failed to present volume %(name)s (%(status)d)!") %
{'name': volume['name'], 'status': status})
LOG.error(msg)
RaiseXIODriverException()
# Fetch LUN. In theory the LUN should be what caller requested.
# We try to use shortcut as location comes back in Location header.
# Make sure shortcut of using location header worked, if not ask
# for it explicitly.
location = resp['location']
if location == '':
location = self._alloc_location(volume, hostname)
# Find target LUN
if location != '':
target_lun = self._find_target_lun(location)
# Success. Return target LUN.
LOG.debug("Volume %s presented: %s %s",
volume['name'], hostname, target_lun)
return target_lun
def find_allocations(self, hostname):
"""Find allocations for specified host"""
alloc_cnt = 0
url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {'hostname': hostname})
status = resp['status']
if status != 200:
msg = (_LE("Failed to get allocation information: "
"%(host)s (%(status)d)!") %
{'host': hostname, 'status': status})
LOG.error(msg)
RaiseXIODriverException()
# Good response. Count the number of allocations.
allocation_tree = etree.fromstring(resp['content'])
for allocation in allocation_tree.iter():
if allocation.tag != 'allocation':
continue
alloc_cnt += 1
return alloc_cnt
def _find_host(self, endpoints):
"""Check if host entry exists on ISE based on endpoint (IQN, WWNs)"""
# FC host might have more than one endpoint. ISCSI has only one.
# Check if endpoints is a list, if so use first entry in list for
# host search.
if type(endpoints) is list:
for endpoint in endpoints:
ep = endpoint
break
else:
ep = endpoints
# Got single end point. Now make REST API call to fetch all hosts
LOG.debug("find_host: Looking for host %s.", ep)
host = {}
host['name'] = ''
host['type'] = ''
host['locator'] = ''
params = {}
url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, params)
status = resp['status']
if resp['status'] != 200:
msg = _LE("Could not find any hosts (%s)") % status
LOG.error(msg)
RaiseXIODriverException()
# Good response. Try to match up a host based on end point string.
host_tree = etree.fromstring(resp['content'])
for host_node in host_tree.iter():
if host_node.tag != 'host':
continue
# Found a host tag. Check if end point matches.
endpoints_node = host_node.find('endpoints')
if endpoints_node is None:
continue
for endpoint_node in endpoints_node.iter():
if endpoint_node.tag != 'endpoint':
continue
gid = endpoint_node.find('globalid')
if gid is None:
continue
if string.upper(gid.text) != string.upper(ep):
continue
# We have a match. Fill in host name, type and locator
host['locator'] = host_node.attrib['self']
type_tag = host_node.find('type')
if type_tag is not None:
host['type'] = type_tag.text
name_tag = host_node.find('name')
if name_tag is not None:
host['name'] = name_tag.text
break
# This will be filled in or '' based on findings above.
return host
def _create_host(self, hostname, endpoints):
"""Create host entry on ISE for connector"""
# Create endpoint list for REST call.
endpoint_str = ''
if type(endpoints) is list:
ep_str = []
ec = 0
for endpoint in endpoints:
if ec == 0:
ep_str.append("%s" % (endpoint))
else:
ep_str.append("endpoint=%s" % (endpoint))
ec += 1
endpoint_str = '&'.join(ep_str)
else:
endpoint_str = endpoints
# Log host creation.
LOG.debug("Create host %s; %s", hostname, endpoint_str)
# Issue REST call to create host entry of Openstack type.
params = {}
params = {'name': hostname, 'endpoint': endpoint_str,
'os': 'openstack'}
url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid())
resp = self._send_cmd('POST', url, params)
status = resp['status']
if status != 201 and status != 409:
msg = _LE("POST for host create failed (%s)!") % status
LOG.error(msg)
RaiseXIODriverException()
# Successfully created host entry. Return host name.
return hostname
def _create_clone(self, volume, clone, clone_type):
"""Create clone worker function"""
# This function is called for both snapshot and clone
# clone_type specifies what type is being processed
# Creating snapshots and clones is a two step process on current ISE
# FW. First snapshot/clone is prepared and then created.
volume_name = ''
if clone_type == 'snapshot':
volume_name = volume['volume_name']
elif clone_type == 'clone':
volume_name = volume['name']
args = {}
# Make sure source volume is ready. This is another case where
# we have to work around asynchronous behavior in ISE REST API.
args['name'] = volume_name
args['status_string'] = OPERATIONAL_STATUS
retries = self.configuration.ise_completion_retries
vol_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if vol_info['value'] == '0':
LOG.debug('Source volume %s ready.', volume_name)
else:
msg = _LE("Source volume %s not ready!") % volume_name
LOG.error(msg)
RaiseXIODriverException()
# Prepare snapshot
# get extra_specs and qos specs from source volume
# these functions fill in default values for entries used below
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
qos = self._get_qos_specs(ctxt, type_id)
# Wait until snapshot/clone is prepared.
args['method'] = 'POST'
args['url'] = vol_info['location']
args['status'] = 202
args['arglist'] = {'name': clone['name'],
'type': clone_type,
'affinity': extra_specs['affinity'],
'IOPSmin': qos['minIOPS'],
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']}
retries = self.configuration.ise_completion_retries
resp = self._wait_for_completion(self._help_call_method,
args, retries)
if resp['status'] != 202:
# clone prepare failed - bummer
msg = _LE("Prepare clone failed for %s.") % clone['name']
LOG.error(msg)
RaiseXIODriverException()
# clone prepare request accepted
# make sure not to continue until clone prepared
args['name'] = clone['name']
args['status_string'] = PREPARED_STATUS
retries = self.configuration.ise_completion_retries
clone_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if PREPARED_STATUS in clone_info['details']:
LOG.debug('Clone %s prepared.', clone['name'])
else:
msg = (_LE("Clone %s not in prepared state!") % clone['name'])
LOG.error(msg)
RaiseXIODriverException()
# Clone prepared, now commit the create
resp = self._send_cmd('PUT', clone_info['location'],
{clone_type: 'true'})
if resp['status'] != 201:
msg = (_LE("Commit clone failed: %(name)s (%(status)d)!") %
{'name': clone['name'], 'status': resp['status']})
LOG.error(msg)
RaiseXIODriverException()
# Clone create request accepted. Make sure not to return until clone
# operational.
args['name'] = clone['name']
args['status_string'] = OPERATIONAL_STATUS
retries = self.configuration.ise_completion_retries
clone_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if OPERATIONAL_STATUS in clone_info['string']:
msg = _LI("Clone %s created."), clone['name']
LOG.info(msg)
else:
msg = _LE("Commit failed for %s!") % clone['name']
LOG.error(msg)
RaiseXIODriverException()
return
def _fill_in_available_capacity(self, node, pool):
"""Fill in free capacity info for pool."""
available = node.find('available')
if available is None:
pool['free_capacity_gb'] = 0
return pool
pool['free_capacity_gb'] = int(available.get('total'))
# Fill in separate RAID level cap
byred = available.find('byredundancy')
if byred is None:
return pool
raid = byred.find('raid-0')
if raid is not None:
pool['free_capacity_gb_raid_0'] = int(raid.text)
raid = byred.find('raid-1')
if raid is not None:
pool['free_capacity_gb_raid_1'] = int(raid.text)
raid = byred.find('raid-5')
if raid is not None:
pool['free_capacity_gb_raid_5'] = int(raid.text)
raid = byred.find('raid-6')
if raid is not None:
pool['free_capacity_gb_raid_6'] = int(raid.text)
return pool
def _fill_in_used_capacity(self, node, pool):
"""Fill in used capacity info for pool."""
used = node.find('used')
if used is None:
pool['allocated_capacity_gb'] = 0
return pool
pool['allocated_capacity_gb'] = int(used.get('total'))
# Fill in separate RAID level cap
byred = used.find('byredundancy')
if byred is None:
return pool
raid = byred.find('raid-0')
if raid is not None:
pool['allocated_capacity_gb_raid_0'] = int(raid.text)
raid = byred.find('raid-1')
if raid is not None:
pool['allocated_capacity_gb_raid_1'] = int(raid.text)
raid = byred.find('raid-5')
if raid is not None:
pool['allocated_capacity_gb_raid_5'] = int(raid.text)
raid = byred.find('raid-6')
if raid is not None:
pool['allocated_capacity_gb_raid_6'] = int(raid.text)
return pool
def _get_pools(self):
"""Return information about all pools on ISE"""
pools = []
pool = {}
vol_cnt = 0
url = '/storage/pools'
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
# Request failed. Return what we have, which isn't much.
msg = _LW("Could not get pool information (%s)!") % status
LOG.warning(msg)
return (pools, vol_cnt)
# Parse out available (free) and used. Add them up to get total.
xml_tree = etree.fromstring(resp['content'])
for child in xml_tree:
if child.tag != 'pool':
continue
# Fill in ise pool name
tag = child.find('name')
if tag is not None:
pool['pool_ise_name'] = tag.text
# Fill in globalid
tag = child.find('globalid')
if tag is not None:
pool['globalid'] = tag.text
# Fill in pool name
tag = child.find('id')
if tag is not None:
pool['pool_name'] = tag.text
# Fill in pool status
tag = child.find('status')
if tag is not None:
pool['status'] = tag.attrib['string']
details = tag.find('details')
if details is not None:
detail = details.find('detail')
if detail is not None:
pool['status_details'] = detail.text
# Fill in available capacity
pool = self._fill_in_available_capacity(child, pool)
# Fill in allocated capacity
pool = self._fill_in_used_capacity(child, pool)
# Fill in media health and type
media = child.find('media')
if media is not None:
medium = media.find('medium')
if medium is not None:
health = medium.find('health')
if health is not None:
pool['health'] = int(health.text)
tier = medium.find('tier')
if tier is not None:
pool['media'] = tier.attrib['string']
cap = child.find('IOPSmincap')
if cap is not None:
pool['minIOPS_capacity'] = cap.text
cap = child.find('IOPSmaxcap')
if cap is not None:
pool['maxIOPS_capacity'] = cap.text
cap = child.find('IOPSburstcap')
if cap is not None:
pool['burstIOPS_capacity'] = cap.text
pool['total_capacity_gb'] = (int(pool['free_capacity_gb'] +
pool['allocated_capacity_gb']))
pool['QoS_support'] = self.configuration.ise_qos
pool['reserved_percentage'] = 0
pools.append(pool)
# count volumes
volumes = child.find('volumes')
if volumes is not None:
for volume in volumes:
vol_cnt += 1
return (pools, vol_cnt)
def _update_volume_stats(self):
"""Update storage information"""
self._send_query()
data = {}
data["vendor_name"] = 'X-IO'
data["driver_version"] = self._get_version()
if self.configuration.volume_backend_name:
backend_name = self.configuration.volume_backend_name
else:
backend_name = self.__class__.__name__
data["volume_backend_name"] = backend_name
data['reserved_percentage'] = 0
# Get total and free capacity.
(pools, vol_cnt) = self._get_pools()
total_cap = 0
free_cap = 0
# fill in global capability support
# capacity
for pool in pools:
total_cap += int(pool['total_capacity_gb'])
free_cap += int(pool['free_capacity_gb'])
data['total_capacity_gb'] = int(total_cap)
data['free_capacity_gb'] = int(free_cap)
# QoS
data['QoS_support'] = self.configuration.ise_qos
# Volume affinity
data['affinity'] = self.configuration.ise_affinity
# Thin provisioning
data['thin'] = self.configuration.san_thin_provision
data['pools'] = pools
data['active_volumes'] = int(vol_cnt)
return data
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
if refresh:
self._vol_stats = self._update_volume_stats()
LOG.debug("ISE get_volume_stats (total, free): %s, %s",
self._vol_stats['total_capacity_gb'],
self._vol_stats['free_capacity_gb'])
return self._vol_stats
def _get_extra_specs(self, ctxt, type_id):
"""Get extra specs from volume type."""
specs = {}
specs['affinity'] = ''
specs['alloctype'] = ''
specs['pool'] = self.configuration.ise_storage_pool
specs['raid'] = self.configuration.ise_raid
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
extra_specs = volume_type.get('extra_specs')
# Parse out RAID, pool and affinity values
for key, value in extra_specs.iteritems():
subkey = ''
if ':' in key:
fields = key.split(':')
key = fields[0]
subkey = fields[1]
if string.upper(key) == string.upper('Feature'):
if string.upper(subkey) == string.upper('Raid'):
specs['raid'] = value
elif string.upper(subkey) == string.upper('Pool'):
specs['pool'] = value
elif string.upper(key) == string.upper('Affinity'):
# Only fill this in if ISE FW supports volume affinity
if self.configuration.ise_affinity:
if string.upper(subkey) == string.upper('Type'):
specs['affinity'] = value
elif string.upper(key) == string.upper('Alloc'):
# Only fill this in if ISE FW supports thin provisioning
if self.configuration.san_thin_provision:
if string.upper(subkey) == string.upper('Type'):
specs['alloctype'] = value
return specs
def _get_qos_specs(self, ctxt, type_id):
"""Get QoS specs from volume type."""
specs = {}
specs['minIOPS'] = ''
specs['maxIOPS'] = ''
specs['burstIOPS'] = ''
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
else:
kvs = volume_type.get('extra_specs')
# Parse out min, max and burst values
for key, value in kvs.iteritems():
if ':' in key:
fields = key.split(':')
key = fields[1]
if string.upper(key) == string.upper('minIOPS'):
specs['minIOPS'] = value
elif string.upper(key) == string.upper('maxIOPS'):
specs['maxIOPS'] = value
elif string.upper(key) == string.upper('burstIOPS'):
specs['burstIOPS'] = value
return specs
def create_volume(self, volume):
"""Create requested volume"""
LOG.debug("X-IO create_volume called.")
# get extra_specs and qos based on volume type
# these functions fill in default values for entries used below
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
qos = self._get_qos_specs(ctxt, type_id)
# Make create call
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
resp = self._send_cmd('POST', url,
{'name': volume['name'],
'size': volume['size'],
'pool': extra_specs['pool'],
'redundancy': extra_specs['raid'],
'affinity': extra_specs['affinity'],
'alloctype': extra_specs['alloctype'],
'IOPSmin': qos['minIOPS'],
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if resp['status'] != 201:
msg = (_LE("Failed to create volume: %(name)s (%(status)s)") %
{'name': volume['name'], 'status': resp['status']})
LOG.error(msg)
RaiseXIODriverException()
# Good response. Make sure volume is in operational state before
# returning. Volume creation completes asynchronously.
args = {}
args['name'] = volume['name']
args['status_string'] = OPERATIONAL_STATUS
retries = self.configuration.ise_completion_retries
vol_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if OPERATIONAL_STATUS in vol_info['string']:
# Ready.
msg = _LI("Volume %s created"), volume['name']
LOG.info(msg)
else:
msg = _LE("Failed to create volume %s.") % volume['name']
LOG.error(msg)
RaiseXIODriverException()
return
def create_cloned_volume(self, volume, src_vref):
"""Create clone"""
LOG.debug("X-IO create_cloned_volume called.")
self._create_clone(src_vref, volume, 'clone')
def create_snapshot(self, snapshot):
"""Create snapshot"""
LOG.debug("X-IO create_snapshot called.")
# Creating a snapshot uses same interface as clone operation on
# ISE. Clone type ('snapshot' or 'clone') tells the ISE what kind
# of operation is requested.
self._create_clone(snapshot, snapshot, 'snapshot')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create volume from snapshot"""
LOG.debug("X-IO create_volume_from_snapshot called.")
# ISE snapshots are just like a volume so this is a clone operation.
self._create_clone(snapshot, volume, 'clone')
def _delete_volume(self, volume):
"""Delete specified volume"""
LOG.debug("X-IO delete_volume called.")
# First unpresent volume from all hosts.
self._alloc_location(volume, '', 1)
# Get volume status. Location string for volume comes back
# in response. Used for DELETE call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LW("Delete volume: %s not found!") % volume['name']
LOG.warning(msg)
return
# Make DELETE call.
args = {}
args['method'] = 'DELETE'
args['url'] = vol_info['location']
args['arglist'] = {}
args['status'] = 204
retries = self.configuration.ise_completion_retries
resp = self._wait_for_completion(self._help_call_method, args, retries)
if resp['status'] == 204:
msg = (_LI("Volume %s deleted."), volume['name'])
LOG.info(msg)
return
def delete_volume(self, volume):
"""Delete specified volume"""
LOG.debug("X-IO delete_volume called.")
self._delete_volume(volume)
def delete_snapshot(self, snapshot):
"""Delete snapshot"""
LOG.debug("X-IO delete_snapshot called.")
# Delete snapshot and delete volume is identical to ISE.
self._delete_volume(snapshot)
def _modify_volume(self, volume, new_attributes):
# Get volume status. Location string for volume comes back
# in response. Used for PUT call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LE("modify volume: %s does not exist!") % volume['name']
LOG.error(msg)
RaiseXIODriverException()
# Make modify volume REST call using PUT.
# Location from above is used as identifier.
resp = self._send_cmd('PUT', vol_info['location'], new_attributes)
status = resp['status']
if status == 201:
LOG.debug("Volume %s modified.", volume['name'])
return True
msg = (_LE("Modify volume PUT failed: %(name)s (%(status)d).") %
{'name': volume['name'], 'status': status})
LOG.error(msg)
RaiseXIODriverException()
def extend_volume(self, volume, new_size):
"""Extend volume to new size."""
LOG.debug("extend_volume called")
ret = self._modify_volume(volume, {'size': new_size})
if ret is True:
msg = (_LI("volume %(name)s extended to %(size)d."),
{'name': volume['name'], 'size': new_size})
LOG.info(msg)
return
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
LOG.debug("X-IO retype called")
qos = self._get_qos_specs(ctxt, new_type['id'])
ret = self._modify_volume(volume, {'IOPSmin': qos['minIOPS'],
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
msg = _LI("Volume %s retyped."), volume['name']
LOG.info(msg)
return True
def manage_existing(self, volume, ise_volume_ref):
"""Convert an existing ISE volume to a Cinder volume."""
LOG.debug("X-IO manage_existing called")
if 'source-name' not in ise_volume_ref:
msg = _LE("manage_existing: No source-name in ref!")
LOG.error(msg)
RaiseXIODriverException()
# copy the source-name to 'name' for modify volume use
ise_volume_ref['name'] = ise_volume_ref['source-name']
ctxt = context.get_admin_context()
qos = self._get_qos_specs(ctxt, volume['volume_type_id'])
ret = self._modify_volume(ise_volume_ref,
{'name': volume['name'],
'IOPSmin': qos['minIOPS'],
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
msg = _LI("Volume %s converted."), ise_volume_ref['name']
LOG.info(msg)
return ret
def manage_existing_get_size(self, volume, ise_volume_ref):
"""Get size of an existing ISE volume."""
LOG.debug("X-IO manage_existing_get_size called")
if 'source-name' not in ise_volume_ref:
msg = _LE("manage_existing_get_size: No source-name in ref!")
LOG.error(msg)
RaiseXIODriverException()
ref_name = ise_volume_ref['source-name']
# get volume status including size
vol_info = self._get_volume_info(ref_name)
if vol_info['location'] == '':
msg = (_LE("manage_existing_get_size: %s does not exist!") %
ref_name)
LOG.error(msg)
RaiseXIODriverException()
return int(vol_info['size'])
def unmanage(self, volume):
"""Remove Cinder management from ISE volume"""
LOG.debug("X-IO unmanage called")
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LE("unmanage: Volume %s does not exist!") % volume['name']
LOG.error(msg)
RaiseXIODriverException()
# This is a noop. ISE does not store any Cinder specific information.
def ise_present(self, volume, hostname_in, endpoints):
"""Set up presentation for volume and specified connector"""
LOG.debug("X-IO ise_present called.")
# Create host entry on ISE if necessary.
# Check to see if host entry already exists.
# Create if not found
host = self._find_host(endpoints)
if host['name'] == '':
# host not found, so create new host entry
# Use host name if filled in. If blank, ISE will make up a name.
self._create_host(hostname_in, endpoints)
host = self._find_host(endpoints)
if host['name'] == '':
# host still not found, this is fatal.
msg = _LE("Host could not be found!")
LOG.error(msg)
RaiseXIODriverException()
elif string.upper(host['type']) != 'OPENSTACK':
# Make sure host type is marked as Openstack host
params = {'os': 'openstack'}
resp = self._send_cmd('PUT', host['locator'], params)
status = resp['status']
if status != 201 and status != 409:
msg = _LE("Host PUT failed (%s).") % status
LOG.error(msg)
RaiseXIODriverException()
# We have a host object.
target_lun = ''
# Present volume to host.
target_lun = self._present_volume(volume, host['name'], target_lun)
# Fill in target information.
data = {}
data['target_lun'] = target_lun
data['volume_id'] = volume['id']
data['access_mode'] = 'rw'
return data
def ise_unpresent(self, volume, endpoints):
"""Delete presentation between volume and connector"""
LOG.debug("X-IO ise_unpresent called.")
# Delete allocation uses host name. Go find it based on endpoints.
host = self._find_host(endpoints)
if host['name'] != '':
# Delete allocation based on hostname and volume.
self._alloc_location(volume, host['name'], 1)
return host['name']
def create_export(self, context, volume):
LOG.debug("X-IO create_export called.")
def ensure_export(self, context, volume):
LOG.debug("X-IO ensure_export called.")
def remove_export(self, context, volume):
LOG.debug("X-IO remove_export called.")
def local_path(self, volume):
LOG.debug("X-IO local_path called.")
def delete_host(self, endpoints):
"""Delete ISE host object"""
host = self._find_host(endpoints)
if host['locator'] != '':
# Delete host
self._send_cmd('DELETE', host['locator'], {})
LOG.debug("X-IO: host %s deleted", host['name'])
# Protocol specific classes for entry. They are wrappers around base class
# above and every external API resuslts in a call to common function in base
# class.
class XIOISEISCSIDriver(driver.ISCSIDriver):
"""Requires ISE Running FW version 3.1.0 or higher"""
def __init__(self, *args, **kwargs):
super(XIOISEISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XIO_OPTS)
self.configuration.append_config_values(san.san_opts)
# The iscsi_ip_address must always be set.
if self.configuration.iscsi_ip_address == '':
err_msg = _LE("iscsi_ip_address must be set!")
LOG.error(err_msg)
RaiseXIODriverException()
# Setup common driver
self.driver = XIOISEDriver(configuration=self.configuration)
def do_setup(self, context):
return self.driver.do_setup(context)
def check_for_setup_error(self):
return self.driver.check_for_setup_error()
def local_path(self, volume):
return self.driver.local_path(volume)
def get_volume_stats(self, refresh=False):
data = self.driver.get_volume_stats(refresh)
data["storage_protocol"] = 'iSCSI'
return data
def create_volume(self, volume):
self.driver.create_volume(volume)
# Volume created successfully. Fill in CHAP information.
model_update = {}
chap = {}
chap = self.driver.find_target_chap()
if chap['chap_user'] != '':
model_update['provider_auth'] = 'CHAP %s %s' % \
(chap['chap_user'], chap['chap_passwd'])
else:
model_update['provider_auth'] = ''
return model_update
def create_cloned_volume(self, volume, src_vref):
return self.driver.create_cloned_volume(volume, src_vref)
def create_volume_from_snapshot(self, volume, snapshot):
return self.driver.create_volume_from_snapshot(volume, snapshot)
def delete_volume(self, volume):
return self.driver.delete_volume(volume)
def extend_volume(self, volume, new_size):
return self.driver.extend_volume(volume, new_size)
def retype(self, ctxt, volume, new_type, diff, host):
return self.driver.retype(ctxt, volume, new_type, diff, host)
def manage_existing(self, volume, ise_volume_ref):
ret = self.driver.manage_existing(volume, ise_volume_ref)
if ret is True:
# Volume converted successfully. Fill in CHAP information.
model_update = {}
chap = {}
chap = self.driver.find_target_chap()
if chap['chap_user'] != '':
model_update['provider_auth'] = 'CHAP %s %s' % \
(chap['chap_user'], chap['chap_passwd'])
else:
model_update['provider_auth'] = ''
return model_update
def manage_existing_get_size(self, volume, ise_volume_ref):
return self.driver.manage_existing_get_size(volume, ise_volume_ref)
def unmanage(self, volume):
return self.driver.unmanage(volume)
def initialize_connection(self, volume, connector):
hostname = ''
if 'host' in connector:
hostname = connector['host']
data = self.driver.ise_present(volume, hostname,
connector['initiator'])
# find IP for target
data['target_portal'] = \
'%s:3260' % (self.configuration.iscsi_ip_address)
# set IQN for target
data['target_discovered'] = False
data['target_iqn'] = \
self.driver.find_target_iqn(self.configuration.iscsi_ip_address)
# Fill in authentication method (CHAP)
if 'provider_auth' in volume:
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
data['auth_method'] = auth_method
data['auth_username'] = auth_username
data['auth_password'] = auth_secret
return {'driver_volume_type': 'iscsi',
'data': data}
def terminate_connection(self, volume, connector, **kwargs):
hostname = self.driver.ise_unpresent(volume, connector['initiator'])
alloc_cnt = 0
if hostname != '':
alloc_cnt = self.driver.find_allocations(hostname)
if alloc_cnt == 0:
# delete host object
self.driver.delete_host(connector['initiator'])
def create_snapshot(self, snapshot):
return self.driver.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
return self.driver.delete_snapshot(snapshot)
def create_export(self, context, volume):
return self.driver.create_export(context, volume)
def ensure_export(self, context, volume):
return self.driver.ensure_export(context, volume)
def remove_export(self, context, volume):
return self.driver.remove_export(context, volume)
class XIOISEFCDriver(driver.FibreChannelDriver):
"""Requires ISE Running FW version 2.8.0 or higher"""
def __init__(self, *args, **kwargs):
super(XIOISEFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XIO_OPTS)
self.configuration.append_config_values(san.san_opts)
self.driver = XIOISEDriver(configuration=self.configuration)
def do_setup(self, context):
return self.driver.do_setup(context)
def check_for_setup_error(self):
return self.driver.check_for_setup_error()
def local_path(self, volume):
return self.driver.local_path(volume)
def get_volume_stats(self, refresh=False):
data = self.driver.get_volume_stats(refresh)
data["storage_protocol"] = 'fibre_channel'
return data
def create_volume(self, volume):
return self.driver.create_volume(volume)
def create_cloned_volume(self, volume, src_vref):
return self.driver.create_cloned_volume(volume, src_vref)
def create_volume_from_snapshot(self, volume, snapshot):
return self.driver.create_volume_from_snapshot(volume, snapshot)
def delete_volume(self, volume):
return self.driver.delete_volume(volume)
def extend_volume(self, volume, new_size):
return self.driver.extend_volume(volume, new_size)
def retype(self, ctxt, volume, new_type, diff, host):
return self.driver.retype(ctxt, volume, new_type, diff, host)
def manage_existing(self, volume, ise_volume_ref):
return self.driver.manage_existing(volume, ise_volume_ref)
def manage_existing_get_size(self, volume, ise_volume_ref):
return self.driver.manage_existing_get_size(volume, ise_volume_ref)
def unmanage(self, volume):
return self.driver.unmanage(volume)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
hostname = ''
if 'host' in connector:
hostname = connector['host']
data = self.driver.ise_present(volume, hostname, connector['wwpns'])
data['target_discovered'] = True
# set wwns for target
target_wwns = self.driver.find_target_wwns()
data['target_wwn'] = target_wwns
# build target initiator map
target_map = {}
for initiator in connector['wwpns']:
target_map[initiator] = target_wwns
data['initiator_target_map'] = target_map
return {'driver_volume_type': 'fibre_channel',
'data': data}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
# now we are ready to tell ISE to delete presentations
hostname = self.driver.ise_unpresent(volume, connector['wwpns'])
# set target_wwn and initiator_target_map only if host
# has no more presentations
data = {}
alloc_cnt = 0
if hostname != '':
alloc_cnt = self.driver.find_allocations(hostname)
if alloc_cnt == 0:
target_wwns = self.driver.find_target_wwns()
data['target_wwn'] = target_wwns
# build target initiator map
target_map = {}
for initiator in connector['wwpns']:
target_map[initiator] = target_wwns
data['initiator_target_map'] = target_map
# delete host object
self.driver.delete_host(connector['wwpns'])
return {'driver_volume_type': 'fibre_channel',
'data': data}
def create_snapshot(self, snapshot):
return self.driver.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
return self.driver.delete_snapshot(snapshot)
def create_export(self, context, volume):
return self.driver.create_export(context, volume)
def ensure_export(self, context, volume):
return self.driver.ensure_export(context, volume)
def remove_export(self, context, volume):
return self.driver.remove_export(context, volume)
| {
"content_hash": "977980e2a76422329037f56790959592",
"timestamp": "",
"source": "github",
"line_count": 1633,
"max_line_length": 79,
"avg_line_length": 41.78567054500918,
"alnum_prop": 0.5513951579811244,
"repo_name": "tmenjo/cinder-2015.1.0",
"id": "c44f0ba4865df214913365d6cd517e4d20dc33a0",
"size": "68861",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/xio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10650346"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
"""
$description Turkish live TV channels from Dogus Group, including Euro Star, Star and NTV.
$url eurostartv.com.tr
$url kralmuzik.com.tr
$url ntv.com.tr
$url startv.com.tr
$type live
"""
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
@pluginmatcher(re.compile(r"https?://(?:www\.)?eurostartv\.com\.tr/canli-izle"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?kralmuzik\.com\.tr/tv/.+"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?ntv\.com\.tr/canli-yayin/ntv"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?startv\.com\.tr/canli-yayin"))
class Dogus(Plugin):
_re_live_hls = re.compile(r"'(https?://[^']+/live/hls/[^']+)'")
_re_yt_script = re.compile(r"youtube\.init\('([\w-]{11})'")
def _get_streams(self):
root = self.session.http.get(self.url, schema=validate.Schema(validate.parse_html()))
# https://www.ntv.com.tr/canli-yayin/ntv?youtube=true
yt_iframe = root.xpath("string(.//iframe[contains(@src,'youtube.com')][1]/@src)")
# https://www.startv.com.tr/canli-yayin
dm_iframe = root.xpath("string(.//iframe[contains(@src,'dailymotion.com')][1]/@src)")
# https://www.kralmuzik.com.tr/tv/kral-tv
# https://www.kralmuzik.com.tr/tv/kral-pop-tv
yt_script = root.xpath("string(.//script[contains(text(), 'youtube.init')][1]/text())")
if yt_script:
m = self._re_yt_script.search(yt_script)
if m:
yt_iframe = f"https://www.youtube.com/watch?v={m.group(1)}"
iframe = yt_iframe or dm_iframe
if iframe:
return self.session.streams(iframe)
# http://eurostartv.com.tr/canli-izle
dd_script = root.xpath("string(.//script[contains(text(), '/live/hls/')][1]/text())")
if dd_script:
m = self._re_live_hls.search(dd_script)
if m:
return HLSStream.parse_variant_playlist(self.session, m.group(1))
__plugin__ = Dogus
| {
"content_hash": "c702f0d1ac8173c37c5301ae5b78d1ab",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 39.23076923076923,
"alnum_prop": 0.6225490196078431,
"repo_name": "chhe/streamlink",
"id": "3664a1ecc546e829d20a60c7a3d51f959fd18995",
"size": "2040",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/dogus.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1513527"
},
{
"name": "Shell",
"bytes": "6427"
}
],
"symlink_target": ""
} |
import requests
def generate_html(top_lessons):
rows = ""
for lesson in top_lessons:
rows += """<tr>
<td><a href="https://stepik.org/lesson/{id}">{title}</a></td>
<td>{viewed_by}</td>
<td>{passed_by}</td>
<td>{vote_delta}</td>
</tr>""".format(**lesson)
template = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset=utf-8 />
<title>Top lessons sorted by number of views</title>
</head>
<body>
<table border="1" align="center">
<tr>
<th>Course title</th>
<th>Viewed by</th>
<th>Passed by</th>
<th>Vote delta</th>
</tr>
{rows}
</table>
</body>
</html>
"""
return template
def main():
# To sort by votes we use "order" query parameter
top_lessons = requests.get("https://stepik.org:443/api/lessons?order=-vote_delta").json()["lessons"]
# Sort by number of views
lessons = sorted(top_lessons, key=lambda x: x["viewed_by"], reverse=True)
# Generate html and write it to file
with open("top_lessons.html", "w+") as f:
f.write(generate_html(lessons))
if __name__ == "__main__":
main()
| {
"content_hash": "d30fad30af01d0914556f5bdf0eed0b4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 104,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.49390243902439024,
"repo_name": "StepicOrg/Stepic-API",
"id": "041a7908e05b22ed9d2adc23b52ede3ccd87d933",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/top_lessons_to_html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "753"
}
],
"symlink_target": ""
} |
"""Keras implementation of StableDiffusion.
Credits:
- Original implementation: https://github.com/CompVis/stable-diffusion
- Initial TF/Keras port: https://github.com/divamgupta/stable-diffusion-tensorflow
The current implementation is a rewrite of the initial TF/Keras port by Divam Gupta.
"""
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.models.stable_diffusion.clip_tokenizer import SimpleTokenizer
from keras_cv.models.stable_diffusion.constants import _ALPHAS_CUMPROD
from keras_cv.models.stable_diffusion.constants import _UNCONDITIONAL_TOKENS
from keras_cv.models.stable_diffusion.decoder import Decoder
from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModel
from keras_cv.models.stable_diffusion.image_encoder import ImageEncoder
from keras_cv.models.stable_diffusion.text_encoder import TextEncoder
MAX_PROMPT_LENGTH = 77
class StableDiffusion:
"""Keras implementation of Stable Diffusion.
Note that the StableDiffusion API, as well as the APIs of the sub-components
of StableDiffusion (e.g. ImageEncoder, DiffusionModel) should be considered
unstable at this point. We do not guarantee backwards compatability for
future changes to these APIs.
Stable Diffusion is a powerful image generation model that can be used,
among other things, to generate pictures according to a short text description
(called a "prompt").
Arguments:
img_height: Height of the images to generate, in pixel. Note that only
multiples of 128 are supported; the value provided will be rounded
to the nearest valid value. Default: 512.
img_width: Width of the images to generate, in pixel. Note that only
multiples of 128 are supported; the value provided will be rounded
to the nearest valid value. Default: 512.
jit_compile: Whether to compile the underlying models to XLA.
This can lead to a significant speedup on some systems. Default: False.
Example:
```python
from keras_cv.models import StableDiffusion
from PIL import Image
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image(
prompt="A beautiful horse running through a field",
batch_size=1, # How many images to generate at once
num_steps=25, # Number of iterations (controls image quality)
seed=123, # Set this to always get the same image from the same prompt
)
Image.fromarray(img[0]).save("horse.png")
print("saved at horse.png")
```
References:
- [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement)
- [Original implementation](https://github.com/CompVis/stable-diffusion)
"""
def __init__(
self,
img_height=512,
img_width=512,
jit_compile=False,
):
# UNet requires multiples of 2**7 = 128
img_height = round(img_height / 128) * 128
img_width = round(img_width / 128) * 128
self.img_height = img_height
self.img_width = img_width
# lazy initialize the component models and the tokenizer
self._image_encoder = None
self._text_encoder = None
self._diffusion_model = None
self._decoder = None
self._tokenizer = None
self.jit_compile = jit_compile
print(
"By using this model checkpoint, you acknowledge that its usage is "
"subject to the terms of the CreativeML Open RAIL-M license at "
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE"
)
def text_to_image(
self,
prompt,
batch_size=1,
num_steps=25,
unconditional_guidance_scale=7.5,
seed=None,
):
encoded_text = self.encode_text(prompt)
return self.generate_image(
encoded_text,
batch_size=batch_size,
num_steps=num_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
seed=seed,
)
def encode_text(self, prompt):
"""Encodes a prompt into a latent text encoding.
The encoding produced by this method should be used as the
`encoded_text` parameter of `StableDiffusion.generate_image`. Encoding
text separately from generating an image can be used to arbitrarily
modify the text encoding priot to image generation, e.g. for walking
between two prompts.
Args:
prompt: a string to encode, must be 77 tokens or shorter.
Example:
```python
from keras_cv.models import StableDiffusion
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
encoded_text = model.encode_text("Tacos at dawn")
img = model.generate_image(encoded_text)
```
"""
# Tokenize prompt (i.e. starting context)
inputs = self.tokenizer.encode(prompt)
if len(inputs) > MAX_PROMPT_LENGTH:
raise ValueError(
f"Prompt is too long (should be <= {MAX_PROMPT_LENGTH} tokens)"
)
phrase = inputs + [49407] * (MAX_PROMPT_LENGTH - len(inputs))
phrase = tf.convert_to_tensor([phrase], dtype=tf.int32)
context = self.text_encoder.predict_on_batch([phrase, self._get_pos_ids()])
return context
def generate_image(
self,
encoded_text,
batch_size=1,
num_steps=25,
unconditional_guidance_scale=7.5,
diffusion_noise=None,
seed=None,
):
"""Generates an image based on encoded text.
The encoding passed to this method should be derived from
`StableDiffusion.encode_text`.
Args:
encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor
of shape (77, 768). When the batch axis is omitted, the same encoded
text will be used to produce every generated image.
batch_size: number of images to generate. Default: 1.
num_steps: number of diffusion steps (controls image quality).
Default: 25.
unconditional_guidance_scale: float controling how closely the image
should adhere to the prompt. Larger values result in more
closely adhering to the prompt, but will make the image noisier.
Default: 7.5.
diffusion_noise: Tensor of shape (`batch_size`, img_height // 8,
img_width // 8, 4), or a Tensor of shape (img_height // 8,
img_width // 8, 4). Optional custom noise to seed the diffusion
process. When the batch axis is omitted, the same noise will be
used to seed diffusion for every generated image.
seed: integer which is used to seed the random generation of
diffusion noise, only to be specified if `diffusion_noise` is
None.
Example:
```python
from keras_cv.models import StableDiffusion
batch_size = 8
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
e_tacos = model.encode_text("Tacos at dawn")
e_watermelons = model.encode_text("Watermelons at dusk")
e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size)
images = model.generate_image(e_interpolated, batch_size=batch_size)
```
"""
if diffusion_noise is not None and seed is not None:
raise ValueError(
"`diffusion_noise` and `seed` should not both be passed to "
"`generate_image`. `seed` is only used to generate diffusion "
"noise when it's not already user-specified."
)
encoded_text = tf.squeeze(encoded_text)
if encoded_text.shape.rank == 2:
encoded_text = tf.repeat(
tf.expand_dims(encoded_text, axis=0), batch_size, axis=0
)
context = encoded_text
unconditional_context = tf.repeat(
self._get_unconditional_context(), batch_size, axis=0
)
if diffusion_noise is not None:
diffusion_noise = tf.squeeze(diffusion_noise)
if diffusion_noise.shape.rank == 3:
diffusion_noise = tf.repeat(
tf.expand_dims(diffusion_noise, axis=0), batch_size, axis=0
)
latent = diffusion_noise
else:
latent = self._get_initial_diffusion_noise(batch_size, seed)
# Iterative reverse diffusion stage
timesteps = tf.range(1, 1000, 1000 // num_steps)
alphas, alphas_prev = self._get_initial_alphas(timesteps)
progbar = keras.utils.Progbar(len(timesteps))
iteration = 0
for index, timestep in list(enumerate(timesteps))[::-1]:
latent_prev = latent # Set aside the previous latent vector
t_emb = self._get_timestep_embedding(timestep, batch_size)
unconditional_latent = self.diffusion_model.predict_on_batch(
[latent, t_emb, unconditional_context]
)
latent = self.diffusion_model.predict_on_batch([latent, t_emb, context])
latent = unconditional_latent + unconditional_guidance_scale * (
latent - unconditional_latent
)
a_t, a_prev = alphas[index], alphas_prev[index]
pred_x0 = (latent_prev - math.sqrt(1 - a_t) * latent) / math.sqrt(a_t)
latent = latent * math.sqrt(1.0 - a_prev) + math.sqrt(a_prev) * pred_x0
iteration += 1
progbar.update(iteration)
# Decoding stage
decoded = self.decoder.predict_on_batch(latent)
decoded = ((decoded + 1) / 2) * 255
return np.clip(decoded, 0, 255).astype("uint8")
def inpaint(
self,
prompt,
image,
mask,
num_resamples=1,
batch_size=1,
num_steps=25,
unconditional_guidance_scale=7.5,
diffusion_noise=None,
seed=None,
verbose=True,
):
"""Inpaints a masked section of the provided image based on the provided prompt.
Args:
prompt: A string representing the prompt for generation.
image: Tensor of shape (`batch_size`, `image_height`, `image_width`,
3) with RGB values in [0, 255]. When the batch is omitted, the same
image will be used as the starting image.
mask: Tensor of shape (`batch_size`, `image_height`, `image_width`)
with binary values 0 or 1. When the batch is omitted, the same mask
will be used on all images.
num_resamples: number of times to resample the generated mask region.
Increasing the number of resamples improves the semantic fit of the
generated mask region w.r.t the rest of the image. Default: 1.
batch_size: number of images to generate. Default: 1.
num_steps: number of diffusion steps (controls image quality).
Default: 25.
unconditional_guidance_scale: float controlling how closely the image
should adhere to the prompt. Larger values result in more
closely adhering to the prompt, but will make the image noisier.
Default: 7.5.
diffusion_noise: (Optional) Tensor of shape (`batch_size`,
img_height // 8, img_width // 8, 4), or a Tensor of shape
(img_height // 8, img_width // 8, 4). Optional custom noise to
seed the diffusion process. When the batch axis is omitted, the
same noise will be used to seed diffusion for every generated image.
seed: (Optional) integer which is used to seed the random generation of
diffusion noise, only to be specified if `diffusion_noise` is None.
verbose: whether to print progress bar. Default: True.
"""
if diffusion_noise is not None and seed is not None:
raise ValueError(
"Please pass either diffusion_noise or seed to inpaint(), seed "
"is only used to generate diffusion noise when it is not provided. "
"Received both diffusion_noise and seed."
)
encoded_text = self.encode_text(prompt)
encoded_text = tf.squeeze(encoded_text)
if encoded_text.shape.rank == 2:
encoded_text = tf.repeat(
tf.expand_dims(encoded_text, axis=0), batch_size, axis=0
)
image = tf.squeeze(image)
image = tf.cast(image, dtype=tf.float32) / 255.0 * 2.0 - 1.0
image = tf.expand_dims(image, axis=0)
known_x0 = self.image_encoder(image)
if image.shape.rank == 3:
known_x0 = tf.repeat(known_x0, batch_size, axis=0)
mask = tf.expand_dims(mask, axis=-1)
mask = tf.cast(
tf.nn.max_pool2d(mask, ksize=8, strides=8, padding="SAME"),
dtype=tf.float32,
)
mask = tf.squeeze(mask)
if mask.shape.rank == 2:
mask = tf.repeat(tf.expand_dims(mask, axis=0), batch_size, axis=0)
mask = tf.expand_dims(mask, axis=-1)
context = encoded_text
unconditional_context = tf.repeat(
self._get_unconditional_context(), batch_size, axis=0
)
if diffusion_noise is not None:
diffusion_noise = tf.squeeze(diffusion_noise)
if diffusion_noise.shape.rank == 3:
diffusion_noise = tf.repeat(
tf.expand_dims(diffusion_noise, axis=0), batch_size, axis=0
)
latent = diffusion_noise
else:
latent = self._get_initial_diffusion_noise(batch_size, seed)
# Iterative reverse diffusion stage
timesteps = tf.range(1, 1000, 1000 // num_steps)
alphas, alphas_prev = self._get_initial_alphas(timesteps)
if verbose:
progbar = keras.utils.Progbar(len(timesteps))
iteration = 0
for index, timestep in list(enumerate(timesteps))[::-1]:
a_t, a_prev = alphas[index], alphas_prev[index]
latent_prev = latent # Set aside the previous latent vector
t_emb = self._get_timestep_embedding(timestep, batch_size)
for resample_index in range(num_resamples):
unconditional_latent = self.diffusion_model.predict_on_batch(
[latent, t_emb, unconditional_context]
)
latent = self.diffusion_model.predict_on_batch([latent, t_emb, context])
latent = unconditional_latent + unconditional_guidance_scale * (
latent - unconditional_latent
)
pred_x0 = (latent_prev - math.sqrt(1 - a_t) * latent) / math.sqrt(a_t)
latent = latent * math.sqrt(1.0 - a_prev) + math.sqrt(a_prev) * pred_x0
# Use known image (x0) to compute latent
if timestep > 1:
noise = tf.random.normal(tf.shape(known_x0), seed=seed)
else:
noise = 0.0
known_latent = (
math.sqrt(a_prev) * known_x0 + math.sqrt(1 - a_prev) * noise
)
# Use known latent in unmasked regions
latent = mask * known_latent + (1 - mask) * latent
# Resample latent
if resample_index < num_resamples - 1 and timestep > 1:
beta_prev = 1 - (a_t / a_prev)
latent_prev = tf.random.normal(
tf.shape(latent),
mean=latent * math.sqrt(1 - beta_prev),
stddev=math.sqrt(beta_prev),
seed=seed,
)
if verbose:
iteration += 1
progbar.update(iteration)
# Decoding stage
decoded = self.decoder.predict_on_batch(latent)
decoded = ((decoded + 1) / 2) * 255
return np.clip(decoded, 0, 255).astype("uint8")
def _get_unconditional_context(self):
unconditional_tokens = tf.convert_to_tensor(
[_UNCONDITIONAL_TOKENS], dtype=tf.int32
)
unconditional_context = self.text_encoder.predict_on_batch(
[unconditional_tokens, self._get_pos_ids()]
)
return unconditional_context
@property
def image_encoder(self):
"""image_encoder returns the VAE Encoder with pretrained weights.
Usage:
```python
sd = keras_cv.models.StableDiffusion()
my_image = np.ones((512, 512, 3))
latent_representation = sd.image_encoder.predict(my_image)
```
"""
if self._image_encoder is None:
self._image_encoder = ImageEncoder(self.img_height, self.img_width)
if self.jit_compile:
self._image_encoder.compile(jit_compile=True)
return self._image_encoder
@property
def text_encoder(self):
"""text_encoder returns the text encoder with pretrained weights.
Can be overriden for tasks like textual inversion where the text encoder
needs to be modified.
"""
if self._text_encoder is None:
self._text_encoder = TextEncoder(MAX_PROMPT_LENGTH)
if self.jit_compile:
self._text_encoder.compile(jit_compile=True)
return self._text_encoder
@property
def diffusion_model(self):
"""diffusion_model returns the diffusion model with pretrained weights.
Can be overriden for tasks where the diffusion model needs to be modified.
"""
if self._diffusion_model is None:
self._diffusion_model = DiffusionModel(
self.img_height, self.img_width, MAX_PROMPT_LENGTH
)
if self.jit_compile:
self._diffusion_model.compile(jit_compile=True)
return self._diffusion_model
@property
def decoder(self):
"""decoder returns the diffusion image decoder model with pretrained weights.
Can be overriden for tasks where the decoder needs to be modified.
"""
if self._decoder is None:
self._decoder = Decoder(self.img_height, self.img_width)
if self.jit_compile:
self._decoder.compile(jit_compile=True)
return self._decoder
@property
def tokenizer(self):
"""tokenizer returns the tokenizer used for text inputs.
Can be overriden for tasks like textual inversion where the tokenizer needs to be modified.
"""
if self._tokenizer is None:
self._tokenizer = SimpleTokenizer()
return self._tokenizer
def _get_timestep_embedding(self, timestep, batch_size, dim=320, max_period=10000):
half = dim // 2
freqs = tf.math.exp(
-math.log(max_period) * tf.range(0, half, dtype=tf.float32) / half
)
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
embedding = tf.reshape(embedding, [1, -1])
return tf.repeat(embedding, batch_size, axis=0)
def _get_initial_alphas(self, timesteps):
alphas = [_ALPHAS_CUMPROD[t] for t in timesteps]
alphas_prev = [1.0] + alphas[:-1]
return alphas, alphas_prev
def _get_initial_diffusion_noise(self, batch_size, seed):
if seed is not None:
return tf.random.stateless_normal(
(batch_size, self.img_height // 8, self.img_width // 8, 4),
seed=[seed, seed],
)
else:
return tf.random.normal(
(batch_size, self.img_height // 8, self.img_width // 8, 4)
)
@staticmethod
def _get_pos_ids():
return tf.convert_to_tensor([list(range(MAX_PROMPT_LENGTH))], dtype=tf.int32)
| {
"content_hash": "627f9fa34510e85131b7ca2fa1e9c11b",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 99,
"avg_line_length": 40.62096774193548,
"alnum_prop": 0.5986202104427238,
"repo_name": "keras-team/keras-cv",
"id": "73f5055051d3bb2d26950163c666ab0f991a45b4",
"size": "20732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras_cv/models/stable_diffusion/stable_diffusion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "20378"
},
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Jsonnet",
"bytes": "967"
},
{
"name": "Jupyter Notebook",
"bytes": "24377"
},
{
"name": "Python",
"bytes": "1606495"
},
{
"name": "Shell",
"bytes": "4249"
},
{
"name": "Smarty",
"bytes": "535"
},
{
"name": "Starlark",
"bytes": "10259"
}
],
"symlink_target": ""
} |
"""
Defines classes that can be used to build up a set of symbols similar to those of C#
(namespace, type, method, property, etc.) that may be manipulated and subsequently
emitted as C# source code.
"""
import sys
import re
import copy
import itertools
from odict import odict
#==========================================================================================================
def classinit(cls, realcls = None):
"""
Automatically initialize a class by calling __clsinit__ if it exists on cls and its base types.
Currently used to implement class static initialization, and in particular the Flags functionality.
"""
for base in cls.__bases__:
classinit(base, cls)
f = getattr(cls, '__clsinit__', None)
if f:
f.__func__(realcls or cls)
return cls
#==========================================================================================================
class Flags(int):
"""
Base class for easily defining sets of flags. Values should be added within a __clsinit__ method.
"""
def __apply_op(self, op, other):
if isinstance(other, type(self)):
return type(other)(op(int(self), int(other)))
elif isinstance(self, type(other)):
return type(self)(op(int(self), int(other)))
else:
raise Exception('Incompatible flag types.')
def __init__(self, value = 0):
""" Used to construct an instance object with value 'value' """
super(Flags, self).__init__(int(value))
def __or__(self, other):
return self.__apply_op(int.__or__, other)
def __xor__(self, other):
return self.__apply_op(int.__xor__, other)
def __and__(self, other):
return self.__apply_op(int.__and__, other)
def __invert__(self):
return type(self)(~(int(self)))
@classmethod
def __clsinit__(cls):
""" Creates internal structures to track flag values. """
cls.__values = list()
cls.__masks = list()
cls.__next_value = 0x1
@classmethod
def AddFlag(cls, name):
""" Add a flag value. Adds attribute '<name>' instance method 'Is<name>'. """
value = cls.__next_value
cls.__next_value = cls.__next_value << 1
cls.__values.append((name, value))
setattr(cls, name, cls(value))
def isFn(self):
return (self & getattr(cls, name)) != 0
setattr(cls, 'Is' + name, isFn)
@classmethod
def AddMask(cls, name, *args):
""" Create a mask from a set of flag names. Adds attribute '<name>Mask' and function 'Is<name>'. """
value = 0
for arg in args:
value = value | getattr(cls, arg)
cls.__masks.append((name + 'Mask', value))
setattr(cls, name + 'Mask', cls(value))
def isFn(self):
return (self & getattr(cls, name + 'Mask')) != 0
setattr(cls, 'Is' + name, isFn)
def __str__(self):
""" Returns a textual representation of the set flag values. """
return '(' + '|'.join(map(lambda (n,f): n, filter(lambda (n,f): (self & f), self.__values))) + ')'
@classmethod
def iterflags(cls):
return cls.__values.__iter__()
@classmethod
def itermasks(cls):
return cls.__masks.__iter__()
#==========================================================================================================
class StrictlyType(object):
"""Returns a strictly typed function"""
def __init__(self,*args):
self.args = args
def __call__(self, f):
def func(*args, **kv):
for a in zip(args[1:min(len(args), len(self.args))] if len(args) == len(self.args) + 1 else args[min(len(args), len(self.args)):], self.args):
if a[1] != None and not isinstance(a[0], a[1]):
raise TypeError, 'Expected %s, got %s (%s)' % (a[1], type(a[0]), a[0])
v = f(*args, **kv)
return v
func.func_name = f.func_name
return func
#==========================================================================================================
class IPrint:
def __init__(self, *args):
self.output = None
self.cur_indent = 0
self.parent = None
self.addWhitespace = False
self.pendingWhitespace = False
for arg in args:
if isinstance(arg, int):
self.cur_indent += arg
elif isinstance(arg, IPrint):
self.parent = arg
self.output = arg.output
self.cur_indent += arg.cur_indent + 1
self.addWhitespace = False
self.pendingWhitespace = False
elif isinstance(arg, file):
self.output = arg
if not self.output:
self.output = sys.stdout
def __write(self, *args, **kv):
indent = 0
if len(args) == 0: return ''
elif len(args) == 1: (arg,) = args
elif len(args) == 2: (indent, arg) = args
elif len(args) > 2: raise Exception('Invalid arguments')
indent += self.cur_indent
isFirst = True
outstr = '\n' if self.pendingWhitespace and self.addWhitespace else ''
self.pendingWhitespace = False
for line in map(lambda a: a.rstrip(), arg.split('\n')):
if not isFirst: outstr += '\n'
if (not isFirst) or ('indentFirstLine' not in kv) or (kv['indentFirstLine']):
outstr += ' ' * (indent * 4)
outstr += line.rstrip()
isFirst = False
outstr = outstr.rstrip()
if arg.endswith('\n'):
outstr += '\n'
self.addWhitespace = True
return outstr
def append(self, *args, **kv):
self.output.write(self.__write(indentFirstLine = False, *args, **kv))
def write(self, *args, **kv):
self.output.write(self.__write(*args, **kv))
def writeline(self, *args, **kv):
self.output.write(self.__write(*args, **kv))
self.output.write('\n')
def __call__(self, *args):
return self.writeline(*args)
def indent(self):
return IPrint(self)
def outdent(self):
return self.parent
def AddWhitespace(self):
self.pendingWhitespace = True
p = self.parent
while p:
p.addWhitespace = True
p = p.parent
#==========================================================================================================
# @classinit
class AccessFlags(Flags):
def __init__(self, value):
super(AccessFlags, self).__init__(value)
@classmethod
def __clsinit__(cls):
cls.AddFlag('Public')
cls.AddFlag('Internal')
cls.AddFlag('Protected')
cls.AddFlag('Private')
cls.AddMask('Visibility', 'Public', 'Internal', 'Protected', 'Private')
cls.All = cls.VisibilityMask
cls.AddFlag('CsExclude')
classinit(AccessFlags)
#==========================================================================================================
# @classinit
class MemberFlags(AccessFlags):
def __init__(self, value):
super(MemberFlags, self).__init__(value)
@classmethod
def __clsinit__(cls):
cls.AddFlag('Static')
cls.AddFlag('Method')
cls.AddFlag('Property')
cls.AddMask('Type', 'Method', 'Property')
cls.AddFlag('Override')
cls.AddFlag('Serialize')
cls.AddFlag('Explicit')
cls.AddFlag('Implicit')
cls.AddFlag('Operator')
cls.AddFlag('Abstract')
cls.AddFlag('Partial')
cls.AddFlag('Virtual')
cls.AddFlag('Extension')
cls.AddFlag('New')
cls.AddFlag('DebugOnly')
cls.AddFlag('Sealed')
classinit(MemberFlags)
#==========================================================================================================
# @classinit
class TypeFlags(AccessFlags):
def __init__(self, value):
super(TypeFlags, self).__init__(value)
@classmethod
def __clsinit__(cls):
cls.AddFlag('Class')
cls.AddFlag('Struct')
cls.AddFlag('Interface')
cls.AddFlag('Enum')
cls.AddMask('Type', 'Class', 'Struct', 'Interface', 'Enum')
cls.AddFlag('Abstract')
cls.AddFlag('Partial')
cls.AddFlag('Static')
cls.AddFlag('Array')
classinit(TypeFlags)
#==========================================================================================================
# @classinit
class EnumFlags(TypeFlags):
def __init__(self, value):
super(EnumFlags, self).__init__(value)
@classmethod
def __clsinit__(cls):
cls.AddFlag('HasFlagValues')
classinit(EnumFlags)
#==========================================================================================================
class CsScopeBlock(object):
def __init__(self, iprint):
self.iprint = iprint
def __enter__(self):
self.iprint('{')
self.iprint.cur_indent += 1
def __exit__(self, exc_type, exc_value, traceback, comment = None):
self.iprint.cur_indent -= 1
if comment:
self.iprint('}} // {0}'.format(comment))
else:
self.iprint('}')
#==========================================================================================================
class CsNamespace(CsScopeBlock):
def __init__(self, iprint, name):
super(CsNamespace, self).__init__(iprint)
self.name = name
def __enter__(self):
self.iprint('\nnamespace {0}'.format(self))
super(CsNamespace, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super(CsNamespace, self).__exit__(exc_type, exc_value, traceback, 'namespace ' + str(self))
def __str__(self):
return self.name
#==========================================================================================================
def CsMakePrivateName(name):
name = str(name)
if re.match(r'[A-Z][a-z0-9_]', name):
return '_' + name[0].lower() + name[1:]
else:
return '_' + name
#==========================================================================================================
def CsMakeArgumentName(name):
name = str(name)
if re.match(r'I[A-Z][a-z0-9_]', name):
return name[1].lower() + name[2:]
else:
return name[0].lower() + name[1:]
#==========================================================================================================
def CsAccessKeyword(flags):
if flags & AccessFlags.Public:
return 'public'
elif flags & AccessFlags.Internal:
return 'internal'
elif flags & AccessFlags.Protected:
return 'protected'
elif flags & AccessFlags.Private:
return 'private'
else:
return ''
#==========================================================================================================
#==========================================================================================================
def CsMemberDeclaration(m):
out = []
if m.flags & MemberFlags.Partial:
return 'partial'
if '.' not in str(m) and not m.parentScope.IsInterface():
out.append(m.CsVisibility())
if m.flags & MemberFlags.Static:
out.append('static')
if m.flags & MemberFlags.Explicit:
out.append('explicit')
if m.flags & MemberFlags.Implicit:
out.append('implicit')
if m.flags & MemberFlags.Virtual:
out.append('virtual')
if m.flags & MemberFlags.Operator:
out.append('operator')
if m.flags & MemberFlags.New:
out.append('new')
if not m.parentScope.IsInterface():
if m.flags & MemberFlags.Override:
out.append('override')
if m.flags & MemberFlags.Abstract:
out.append('abstract')
if m.flags & MemberFlags.Sealed:
out.append('sealed')
return ' '.join(out)
#==========================================================================================================
def Singular(x):
if x == 'MethodSemantics':
return x
elif x.endswith('ies'):
return x[:-3] + 'y'
elif x.endswith('ses'):
return x[:-3]
elif x.endswith('s'):
return x[:-1]
else:
return x
#==========================================================================================================
def Plural(x):
x = Singular(x)
if x == 'MethodSemantics':
return x
elif x.endswith('y'):
return x[:-1] + 'ies'
elif x.endswith('s'):
return x + 'ses'
else:
return x + 's'
#==========================================================================================================
def CsEmitFileHeader(iprint):
iprint(
'''// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// NOTE: This is a generated file - do not manually edit!
''')
#==========================================================================================================
class MemberSet(object):
def __init__(self, parentScope, *args, **kv):
self.__dict = odict()
self.parentScope = parentScope
def add(self, item):
if item == None:
return
item.parentScope = self.parentScope
self.__dict.setdefault(str(item), list()).append(item)
return item
def clear(self):
self.__dict.clear()
def __iadd__(self, iter):
for item in iter:
self.add(item)
return self
def __iter__(self):
return itertools.chain(*self.__dict.values())
def __getitem__(self, name):
val = self.__dict.get(name)
if type(val) != str and len(val) == 1:
return val[0]
else:
return val
def __len__(self):
return len(self.__dict)
#==========================================================================================================
class TypeDefOrRef(object):
def __init__(self, name, comment = None, typeParams = None, **kv):
if type(name) != str:
raise Exception('Must provide name for type.')
self.name = name
self.comment = comment
self.typeParams = typeParams or []
for n,v in kv.iteritems():
setattr(self, n, v)
def UnderlyingType(self):
return getattr(self, 'underlyingType', self)
def ElementType(self):
return getattr(self, 'elementType', self)
def IsGeneric(self):
return len(self.typeParams) != 0
def Instantiate(self, *args):
if not self.IsGeneric():
raise Exception('Attempt to instantiate non-generic type.')
return TypeInst(self, *args)
def __str__(self):
return self.name
#==========================================================================================================
class TypeRef(TypeDefOrRef):
def __init__(self, *args, **kv):
super(TypeRef, self).__init__(*args, **kv)
def CsDefine(self, iprint):
pass
def CsComment(self, iprint):
pass
#==========================================================================================================
class TypeDef(TypeDefOrRef):
def __init__(self, name, baseType = None, flags = AccessFlags.Public,
typeParams = None, interfaces = None, members = None,
underlyingType = None, comment = None, **kv):
super(TypeDef, self).__init__(name, typeParams = typeParams or [], **kv)
if not (flags & AccessFlags.VisibilityMask):
flags = flags | AccessFlags.Public
self.flags = TypeFlags(flags)
if type(baseType) == str:
baseType = TypeRef(baseType)
self.interfaces = interfaces or list()
if self.IsInterface() and baseType:
self.interfaces.append(baseType)
baseType = None
self.baseType = baseType
self.members = MemberSet(self)
if members:
self.members += members
self.parentScope = None
if type(underlyingType) == str:
underlyingType = TypeRef(underlyingType)
self.underlyingType = underlyingType or self
self.comment = comment
def CsDefine(self, iprint):
# If this is a struct and there is a base type specified, fold the base
# type members into the child
if self.flags.IsStruct() and self.baseType != None:
self.members += getattr(self.baseType, 'members', [])
self.baseType = None
# self.CsComment(iprint)
self.CsDefineHeader(iprint)
self.CsDefineBegin(iprint)
self.CsDefineMembers(iprint.indent())
self.CsDefineEnd(iprint)
def CsComment(self, iprint):
if self.comment:
iprint('\n/// ' + self.comment)
def CsTypeName(self):
return str(self)
def CsDefineHeader(self, iprint):
iprint(
'''/// <summary>
/// {}{}
/// </summary>'''.format(self, ' : ' + self.comment if self.comment else ''))
def __CsTypeDeclaration(self):
out = [CsAccessKeyword(self.flags)]
if not self.IsInterface():
if self.flags & TypeFlags.Abstract:
out += ['abstract']
if self.flags & TypeFlags.Static:
out += ['static']
if self.flags & TypeFlags.Partial:
out += ['partial']
if (self.flags & TypeFlags.TypeMask) == TypeFlags.Class:
out += ['class']
elif (self.flags & TypeFlags.TypeMask) == TypeFlags.Struct:
out += ['struct']
elif (self.flags & TypeFlags.TypeMask) == TypeFlags.Interface:
out += ['interface']
elif (self.flags & TypeFlags.TypeMask) == TypeFlags.Enum:
out += ['enum']
out += [self.CsTypeName()]
return ' '.join(out)
def CsDefineBegin(self, iprint):
iprint.write(self.__CsTypeDeclaration())
if self.flags.IsArray():
iprint.append('[]')
if self.typeParams:
iprint.append('<{0}>'.format(', '.join(self.typeParams)))
if self.baseType or self.interfaces:
lst = map(lambda t: str(t), ([self.baseType] if self.baseType else []) + (self.interfaces or []))
iprint.append(' : {0}'.format(', '.join(lst)))
del(lst)
iprint()
iprint('{')
def CsDefineMembers(self, iprint):
members = list()
if self.IsStruct() and self.baseType:
members += filter(lambda t: not isinstance(t, CtorDef), getattr(self.baseType, 'members', []))
members += self.members
if self.IsInterface():
members = filter(lambda m: not isinstance(m, FieldDef), members)
for m in members:
m.CsDefine(iprint)
if not (isinstance(m, FieldDef) or (m.flags & MemberFlags.Abstract)):
iprint.AddWhitespace()
def CsDefineEnd(self, iprint):
iprint('}} // {0}'.format(self))
def FullName(self):
if self.parentScope:
return str(self.parentScope) + '.' + str(self)
else:
return str(self)
def IsInterface(self):
return self.flags & TypeFlags.Interface
def IsStruct(self):
return self.flags & TypeFlags.Struct
def IsClass(self):
return self.flags & TypeFlags.Class
def IsEnum(self):
return self.flags & TypeFlags.Enum
def CsVisibility(self):
return CsAccessKeyword(self.flags)
def __str__(self):
name = super(TypeDef, self).__str__()
if self.IsInterface():
if not name.startswith('I') or (name[1] != name[1].upper()):
return 'I' + name
return name
#==========================================================================================================
def ClassDef(*args, **kv):
t = TypeDef(*args, **kv)
t.flags = TypeFlags((t.flags & ~TypeFlags.TypeMask) | TypeFlags.Class)
return t
#==========================================================================================================
def StructDef(*args, **kv):
t = TypeDef(*args, **kv)
t.flags = TypeFlags((t.flags & ~TypeFlags.TypeMask) | TypeFlags.Struct)
return t
#==========================================================================================================
def InterfaceDef(*args, **kv):
t = TypeDef(*args, **kv)
t.flags = TypeFlags((t.flags & ~TypeFlags.TypeMask) | TypeFlags.Interface)
return t
#==========================================================================================================
class TypeInst(TypeDefOrRef):
def __init__(self, genericType, *typeArgs):
if not typeArgs:
raise Exception('Invalid type instantiation.')
while isinstance(genericType, TypeInst):
typeArgs = genericType.typeArgs + typeArgs
genericType = genericType.genericType
if not isinstance(genericType, TypeDef):
raise Exception('Invalid argument: genericType')
self.genericType = genericType
self.typeArgs = typeArgs
def __str__(self):
if len(self.genericType.typeParams) != len(self.typeArgs):
raise Exception('Incomplete type instantiation: {0}<{1}> / {0}<{2}>'.format(self.genericType, self.genericType.typeParams, map(lambda a: str(a), self.typeArgs)))
return str(self.genericType) + '<{0}>'.format(', '.join(map(lambda a: str(a), self.typeArgs)))
#==========================================================================================================
class MemberDefBase(object):
@StrictlyType(str, None)
def __init__(self, name, flags, comment = None):
self.name = name
self.flags = flags
self.comment = comment
def __str__(self):
return self.name
def CsDefineHeader(self, iprint):
iprint(
'''/// <summary>
/// {}
/// </summary>'''.format(self))
def CsDefine(self, iprint):
self.CsComment(iprint)
self.CsDefineMember(iprint)
def CsComment(self, iprint):
if self.comment:
iprint('\n/// ' + self.comment)
def CsDefineMember(self, iprint):
pass
def CsVisibility(self):
if '.' in str(self):
return ''
else:
return CsAccessKeyword(self.flags)
#==========================================================================================================
class FieldDef(MemberDefBase):
def __init__(self, name, fieldType, flags = AccessFlags.Public, autoInitialize = False, *args, **kv):
if not isinstance(fieldType, TypeDefOrRef):
raise Exception('Invalid field type: {0}'.format(type(fieldType)))
super(FieldDef, self).__init__(name, flags, *args, **kv)
if type(fieldType) == str:
fieldType = TypeDef(fieldType)
self.fieldType = fieldType
self.autoInitialize = autoInitialize
def CsDefineMember(self, iprint):
decl = ' '.join([self.CsVisibility(), str(self.fieldType), str(self)])
if self.autoInitialize:
decl += ' = new {0}()'.format(self.fieldType)
decl += ';'
iprint(decl)
#==========================================================================================================
class EmptyArrayDef(MemberDefBase):
def __init__(self, name, fieldType, flags = AccessFlags.Public, *args, **kv):
super(EmptyArrayDef, self).__init__(name, flags, *args, **kv)
self.fieldType = fieldType
def CsDefineMember(self, iprint):
decl = self.CsVisibility() + ' static ' + str(self.fieldType) + '[]' + ' ' + str(self)
decl += ' = new ' + str(self.fieldType) + '[0];';
iprint(decl)
#==========================================================================================================
def ShouldEmitMethodBody(m):
if m.flags & MemberFlags.Abstract:
return False
elif m.flags & MemberFlags.Partial:
return False
# elif m.parentScope.flags & TypeFlags.Abstract:
# return False
elif m.parentScope.flags & TypeFlags.Interface:
return False
else:
return True
#==========================================================================================================
class PropertyDef(MemberDefBase):
def __init__(self, name, propertyType, flags = AccessFlags.Public, getter = None, setter = None, field = None, *args, **kv):
super(PropertyDef, self).__init__(name, flags, *args, **kv)
self.propertyType = propertyType
if not getter and not setter:
getter = PropertyGetter()
setter = PropertySetter()
self.getter = getter
self.setter = setter
self.field = field
if self.field:
if self.getter:
self.getter.body = 'return {0};'.format(field)
if self.setter:
self.setter.body = '{0} = value;'.format(field)
def CsDefineMember(self, iprint):
iprint(' '.join([CsMemberDeclaration(self), str(self.propertyType), str(self)]).strip())
iprint('{')
for op in filter(lambda op: op != None, [self.getter, self.setter]):
op.parentScope = self
iprint.write(1, ' '.join([op.CsVisibility() if self.flags != op.flags else '', str(op)]).strip())
if op.body and ShouldEmitMethodBody(self):
iprint.append('\n')
iprint(1, '{')
iprint(2, op.body.strip())
iprint(1, '}')
else:
iprint.append(';\n')
iprint('}} // {}'.format(self))
#==========================================================================================================
class PropertyOp(object):
def __init__(self, name, flags = AccessFlags.Public, body = None):
self.name = name
self.flags = flags
self.body = body
def __str__(self):
return self.name
def CsVisibility(self):
if (self.parentScope.flags & AccessFlags.Public) == 0:
return ''
elif (self.flags & AccessFlags.All) != (self.parentScope.flags & AccessFlags.All):
return CsAccessKeyword(self.flags)
else:
return ''
#==========================================================================================================
class PropertyGetter(PropertyOp):
def __init__(self, flags = AccessFlags.Public, body = None):
super(PropertyGetter, self).__init__('get', flags, body)
#==========================================================================================================
class PropertySetter(PropertyOp):
def __init__(self, flags = AccessFlags.Public, body = None):
super(PropertySetter, self).__init__('set', flags, body)
#==========================================================================================================
class MethodDef(MemberDefBase):
def __init__(self, name, flags = AccessFlags.Public, sig = ['void', []],
body = 'throw new NotImplementedException();', typeParams = None,
constraints = None):
super(MethodDef, self).__init__(name, flags)
self.sig = sig
self.body = body
self.typeParams = typeParams
self.constraints = constraints
if flags & MemberFlags.Extension:
self.flags = MemberFlags(flags | MemberFlags.Static)
def __str__(self):
if self.typeParams:
return self.name + '<' + ', '.join(typeParams) + '>'
else:
return self.name
def IsGeneric(self):
return self.typeParams != None
def ReturnType(self):
return str(self.sig[0] or '')
def Parameters(self):
params = '('
if self.flags & MemberFlags.Extension:
params += 'this '
params += ', '.join([' '.join(map(lambda a: str(a), a)) for a in self.sig[1]])
params += ')'
return params
def TypeParameters(self):
return '<' + ', '.join([str(x) for x in self.typeParams]) + '>'
def CsDefineBody(self, iprint):
if self.body != None and ShouldEmitMethodBody(self):
iprint.append('\n')
iprint('{')
iprint(1, self.body.strip())
iprint('}} // {}'.format(self))
else:
iprint.append(';\n')
def CsDefineMember(self, iprint):
if self.flags & MemberFlags.DebugOnly:
iprint('[System.Diagnostics.Conditional("DEBUG")]')
super(MethodDef, self).CsDefineMember(iprint)
decl = [CsMemberDeclaration(self), self.ReturnType(), self]
if self.IsGeneric():
decl += [self.TypeParameters()]
if self.constraints:
for constraint in self.constraints:
decl.append('where {0} : {1}'.format(*constraint))
iprint.write(' '.join(map(lambda i: str(i).strip(), decl)).strip() + self.Parameters())
self.CsDefineBody(iprint)
def CsDefineHeader(self, iprint):
iprint('''
/// <summary>
/// {}
/// </summary>'''.format(self))
for pType,pName in self.sig[1]:
iprint('/// <param name="{}"></param>'.format(pName))
if self.sig[0] and str(self.sig[0]) != 'void':
iprint('/// <returns></returns>')
#==========================================================================================================
class CtorDef(MethodDef):
def __init__(self, flags = AccessFlags.Public, sig = [],
body = 'throw new NotImplementedException();', typeParams = None,
ctorDelegation = None):
super(CtorDef, self).__init__('.ctor', flags, [None, sig], body)
self.name = None
self.ctorDelegation = ctorDelegation
def __str__(self):
return str(self.parentScope)
def CsDefineMember(self, iprint):
if (self.flags & AccessFlags.Public) and not self.body:
return
decl = [CsMemberDeclaration(self), self]
if self.IsGeneric():
decl += [self.TypeParameters()]
ctorDel = ''
if self.ctorDelegation:
ctorDel = ' : this(' + ', '.join(self.ctorDelegation) + ')'
iprint.write(' '.join(map(lambda i: str(i).strip(), decl)) + self.Parameters() + ctorDel)
if ShouldEmitMethodBody(self):
iprint.append('\n')
iprint('{')
iprint(1, self.body.strip())
iprint('}')
else:
iprint.append(';\n')
#==========================================================================================================
class MethodInst(object):
def __init__(self, methodDef, typeArgs):
if typeArgs and type(typeArgs) != list:
typeArgs = [typeArgs]
if (not typeArgs) or (type(typeArgs) != list):
raise Exception('Invalid type instantiation.')
while isinstance(methodDef, MethodInst):
methodDef = methodDef.methodDef
typeArgs = methodDef.typeArgs + typeArgs
self.methodDef = methodDef
self.typeArgs = typeArgs
def __str__(self):
if len(self.methodDef.typeParams) != len(self.typeArgs):
raise Exception('Incomplete type instantiation.')
return str(self.methodDef) + '<{0}>'.format(', '.join(self.typeArgs))
#==========================================================================================================
class NamespaceDef(object):
def __init__(self, name):
self.parentScope = None
self.members = MemberSet(self)
if '.' in name:
pname, name = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
self.name = name
NamespaceDef(pname).members.add(self)
else:
self.name = name
def __str__(self):
if self.parentScope:
return str(self.parentScope) + '.' + self.name
else:
return self.name
def CsDefineBegin(self, iprint):
iprint('namespace {0}\n{{'.format(str(self)))
def CsDefineEnd(self, iprint):
iprint('}} // {0}'.format(str(self)))
def CsDefine(self, iprint):
# Sort the member types in a predictable way.
def typesortkey(t):
name = str(t)
# Remove 'I' prefix from interfaces for sorting.
if t.IsInterface() and re.match(r'I[A-Z]', name):
name = name[1:]
if isinstance(t, EnumDef):
return (1, name)
else:
return (2, name)
# Sort members for consistent source generation, which enables easier diffing.
typeMembers = sorted(filter(lambda m: not isinstance(m, NamespaceDef), self.members),lambda x,y: cmp(typesortkey(x), typesortkey(y)))
if len(typeMembers) > 0:
self.CsDefineBegin(iprint)
iprint = iprint.indent()
for m in typeMembers:
m.CsDefine(iprint)
iprint.AddWhitespace()
iprint = iprint.outdent()
self.CsDefineEnd(iprint)
nsMembers = filter(lambda m: isinstance(m, NamespaceDef), self.members)
for m in nsMembers:
m.CsDefine(iprint)
iprint.AddWhitespace()
#==========================================================================================================
class CodeBlockDef(object):
def __init__(self, codeBlock):
self.codeBlock = codeBlock
def CsDefine(self, iprint):
iprint(self.codeBlock)
#==========================================================================================================
class EnumDef(TypeDef):
def __init__(self, *args, **kv):
super(EnumDef, self).__init__(*args, **kv)
self.flags = EnumFlags((self.flags & ~TypeFlags.TypeMask) | TypeFlags.Enum)
def CsDefineHeader(self, iprint):
super(EnumDef, self).CsDefineHeader(iprint)
if self.flags.IsHasFlagValues():
iprint('[Flags]')
def CsDefineMembers(self, iprint):
for m in self.members:
if not isinstance(m, EnumValue):
raise Exception('Expected enum value')
if m.value == None:
m.value = self.__FindValue()
m.CsDefine(iprint)
def CalculateValues(self):
for m in self.members:
if m.value == None:
m.value = self.__FindValue()
def __FindValue(self):
value = 1 if self.flags.IsHasFlagValues() else 0
while reduce(lambda x, y: x or y.value == value, self.members, False):
if self.flags.IsHasFlagValues():
value <<= 1
else:
value += 1
return value
#==========================================================================================================
class EnumValue(object):
def __init__(self, name, value = None, comment = None):
self.name = name
self.value = value
self.flags = 0
self.comment = comment
def CsDefineHeader(self, iprint):
if self.comment:
iprint.AddWhitespace()
iprint('/// {}'.format(self.comment))
def CsDefine(self, iprint):
self.CsDefineHeader(iprint)
if self.value != None:
if self.parentScope.flags & EnumFlags.HasFlagValues:
iprint('{0} = {1},'.format(self.name, hex(self.value)))
else:
iprint('{0} = {1},'.format(self.name, hex(self.value)))
else:
iprint('{0},'.format(self.name))
def __str__(self):
if self.parentScope:
return str(self.parentScope) + '.' + self.name
else:
return self.name
| {
"content_hash": "952eca0c761e322e58b96681eae42eca",
"timestamp": "",
"source": "github",
"line_count": 1004,
"max_line_length": 173,
"avg_line_length": 35.91434262948207,
"alnum_prop": 0.49262299628376505,
"repo_name": "mjp41/corert",
"id": "991b469422eb12071eb9140456fd265063f27da2",
"size": "36259",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/Common/src/Internal/Metadata/NativeFormat/Script/CsCommonGen2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "469084"
},
{
"name": "Batchfile",
"bytes": "25302"
},
{
"name": "C",
"bytes": "534789"
},
{
"name": "C#",
"bytes": "11468499"
},
{
"name": "C++",
"bytes": "3494918"
},
{
"name": "CMake",
"bytes": "48690"
},
{
"name": "Groovy",
"bytes": "3732"
},
{
"name": "Objective-C",
"bytes": "2243"
},
{
"name": "PowerShell",
"bytes": "4600"
},
{
"name": "Python",
"bytes": "164821"
},
{
"name": "Shell",
"bytes": "30166"
}
],
"symlink_target": ""
} |
import itertools
from collections import defaultdict
import logging
from operator import mul
import networkx as nx
import numpy as np
import pandas as pd
from pgmpy.base import DirectedGraph
from pgmpy.factors import TabularCPD, JointProbabilityDistribution, Factor
from pgmpy.independencies import Independencies
from pgmpy.extern import six
from pgmpy.extern.six.moves import range, reduce
class BayesianModel(DirectedGraph):
"""
Base class for bayesian model.
A models stores nodes and edges with conditional probability
distribution (cpd) and other attributes.
models hold directed edges. Self loops are not allowed neither
multiple (parallel) edges.
Nodes should be strings.
Edges are represented as links between nodes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty bayesian model with no nodes and no edges.
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(BayesianModel, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable python object.
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel/home/abinash/software_packages/numpy-1.7.1
>>> G = BayesianModel()
>>> G.add_nodes_from(['grade', 'intel'])
>>> G.add_edge('grade', 'intel')
"""
if u == v:
raise ValueError('Self loops are not allowed.')
if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u):
raise ValueError(
'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (u, v))
else:
super(BayesianModel, self).add_edge(u, v, **kwargs)
def add_cpds(self, *cpds):
"""
Add CPD (Conditional Probability Distribution) to the Bayesian Model.
Parameters
----------
cpds : list, set, tuple (array-like)
List of CPDs which will be associated with the model
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.CPD import TabularCPD
>>> student = BayesianModel([('diff', 'grades'), ('intel', 'grades')])
>>> grades_cpd = TabularCPD('grades', 3, [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'], evidence_card=[2, 3])
>>> student.add_cpds(grades_cpd)
+------+-----------------------+---------------------+
|diff: | easy | hard |
+------+------+------+---------+------+------+-------+
|intel:| dumb | avg | smart | dumb | avg | smart |
+------+------+------+---------+------+------+-------+
|gradeA| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeB| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeC| 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+------+------+------+---------+------+------+-------+
"""
for cpd in cpds:
if not isinstance(cpd, TabularCPD):
raise ValueError('Only TabularCPD can be added.')
if set(cpd.variables) - set(cpd.variables).intersection(
set(self.nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable))
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpds that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.get_cpds()
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Directed Graph')
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return self.cpds
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: TabularCPD, TreeCPD, RuleCPD object
A CPD object on any subset of the variables of the model which
is to be associated with the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.remove_cpds(cpd)
"""
for cpd in cpds:
if isinstance(cpd, six.string_types):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.evidence
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with %s doesn't have "
"proper parents associated with it." % node)
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(cpd.evidence_card)),
atol=0.01):
raise ValueError('Sum of probabilites of states for node %s'
' is not equal to 1.' % node)
return True
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a list of all ancestors of all the observed nodes including the
node itself.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('D', 'G'), ('I', 'G'), ('G', 'L'),
... ('I', 'L')])
>>> model._get_ancestors_of('G')
{'D', 'G', 'I'}
>>> model._get_ancestors_of(['G', 'I'])
{'D', 'G', 'I'}
"""
if not isinstance(obs_nodes_list, (list, tuple)):
obs_nodes_list = [obs_nodes_list]
ancestors_list = set()
nodes_list = set(obs_nodes_list)
while nodes_list:
node = nodes_list.pop()
if node not in ancestors_list:
nodes_list.update(self.predecessors(node))
ancestors_list.add(node)
return ancestors_list
def active_trail_nodes(self, start, observed=None):
"""
Returns all the nodes reachable from start via an active trail.
Parameters
----------
start: Graph node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff', 'grade'}
>>> student.active_trail_nodes('diff', observed='grades')
{'diff', 'intel'}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = [observed] if isinstance(observed, str) else observed
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
return active_nodes
def local_independencies(self, variables):
"""
Returns a independencies object containing the local independencies
of each of the variables.
Parameters
----------
variables: str or array like
variables whose local independencies are to found.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
>>> ('grade', 'letter'), ('intel', 'SAT')])
>>> ind = student.local_independencies('grade')
>>> ind.event1
{'grade'}
>>> ind.event2
{'SAT'}
>>> ind.event3
{'diff', 'intel'}
"""
def dfs(node):
"""
Returns the descendents of node.
Since there can't be any cycles in the Bayesian Network. This is a
very simple dfs which doen't remember which nodes it has visited.
"""
descendents = []
visit = [node]
while visit:
n = visit.pop()
neighbors = self.neighbors(n)
visit.extend(neighbors)
descendents.extend(neighbors)
return descendents
from pgmpy.independencies import Independencies
independencies = Independencies()
for variable in [variables] if isinstance(variables, str) else variables:
independencies.add_assertions([variable, set(self.nodes()) - set(dfs(variable)) -
set(self.get_parents(variable)) - {variable},
set(self.get_parents(variable))])
return independencies
def is_active_trail(self, start, end, observed=None):
"""
Returns True if there is any active trail between start and end node
Parameters
----------
start : Graph Node
end : Graph Node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
additional_observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed along with
the nodes marked as observed in the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'),
... ('intel', 'sat')])
>>> student.is_active_trail('diff', 'intel')
False
>>> student.is_active_trail('grades', 'sat')
True
"""
if end in self.active_trail_nodes(start, observed):
return True
else:
return False
def get_independencies(self, latex=False):
"""
Compute independencies in Bayesian Network.
Parameters
----------
latex: boolean
If latex=True then latex string of the independence assertion
would be created.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'),
... ('intel', 'sat')])
>>> student.get_independencies()
"""
independencies = Independencies()
for start in (self.nodes()):
for r in (1, len(self.nodes())):
for observed in itertools.combinations(self.nodes(), r):
independent_variables = self.active_trail_nodes(start, observed=observed)
independent_variables = set(independent_variables) - {start}
if independent_variables:
independencies.add_assertions([start, independent_variables,
observed])
independencies.reduce()
if not latex:
return independencies
else:
return independencies.latex_string()
def to_markov_model(self):
"""
Converts bayesian model to markov model. The markov model created would
be the moral graph of the bayesian model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> mm = G.to_markov_model()
>>> mm.nodes()
['diff', 'grade', 'intel', 'SAT', 'letter']
>>> mm.edges()
[('diff', 'intel'), ('diff', 'grade'), ('intel', 'grade'),
('intel', 'SAT'), ('grade', 'letter')]
"""
from pgmpy.models import MarkovModel
moral_graph = self.moralize()
mm = MarkovModel(moral_graph.edges())
mm.add_factors(*[cpd.to_factor() for cpd in self.cpds])
return mm
def to_junction_tree(self):
"""
Creates a junction tree (or clique tree) for a given bayesian model.
For converting a Bayesian Model into a Clique tree, first it is converted
into a Markov one.
For a given markov model (H) a junction tree (G) is a graph
1. where each node in G corresponds to a maximal clique in H
2. each sepset in G separates the variables strictly on one side of the
edge to other.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> jt = G.to_junction_tree()
"""
mm = self.to_markov_model()
return mm.to_junction_tree()
def fit(self, data, estimator_type=None):
"""
Computes the CPD for each node from a given data in the form of a pandas dataframe.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default Maximum Likelihood
estimator would be used
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> model.get_cpds()
[<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e588>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173cb5e10>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e470>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e198>,
<pgmpy.factors.CPD.TabularCPD at 0x7fd173b2e2e8>]
"""
from pgmpy.estimators import MaximumLikelihoodEstimator, BaseEstimator
if estimator_type is None:
estimator_type = MaximumLikelihoodEstimator
else:
if not isinstance(estimator_type, BaseEstimator):
raise TypeError("Estimator object should be a valid pgmpy estimator.")
estimator = estimator_type(self, data)
cpds_list = estimator.get_parameters()
self.add_cpds(*cpds_list)
def predict(self, data):
"""
Predicts states of all the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:800]
>>> predict_data = values[800:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('E', axis=1, inplace=True)
>>> y_pred = model.predict(predict_data)
>>> y_pred
array([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1,
1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,
1, 1, 1, 0, 0, 0, 1, 0])
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
model_inference = VariableElimination(self)
for index, data_point in data.iterrows():
states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
pred_values[k].append(v)
return pd.DataFrame(pred_values, index=data.index)
def get_factorized_product(self, latex=False):
# TODO: refer to IMap class for explanation why this is not implemented.
pass
def get_immoralities(self):
"""
Finds all the immoralities in the model
A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y .
Returns
-------
set: A set of all the immoralities in the model
Examples
---------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> student.get_immoralities()
{('diff','intel')}
"""
immoralities = set()
for node in self.nodes():
for parents in itertools.combinations(self.predecessors(node), 2):
if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]):
immoralities.add(tuple(sorted(parents)))
return immoralities
def is_iequivalent(self, model):
"""
Checks whether the given model is I-equivalent
Two graphs G1 and G2 are said to be I-equivalent if they have same skeleton
and have same set of immoralities.
Note: For same skeleton different names of nodes can work but for immoralities
names of nodes must be same
Parameters
----------
model : A Bayesian model object, for which you want to check I-equivalence
Returns
--------
boolean : True if both are I-equivalent, False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
>>> G.add_edges_from([('V', 'W'), ('W', 'X'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G1 = BayesianModel()
>>> G1.add_edges_from([('W', 'V'), ('X', 'W'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G.is_iequivalent(G1)
True
"""
if not isinstance(model, BayesianModel):
raise TypeError('model must be an instance of Bayesian Model')
skeleton = nx.algorithms.isomorphism.GraphMatcher(self.to_undirected(), model.to_undirected())
if skeleton.is_isomorphic() and self.get_immoralities() == model.get_immoralities():
return True
return False
def is_imap(self, JPD):
"""
Checks whether the bayesian model is Imap of given JointProbabilityDistribution
Parameters
-----------
JPD : An instance of JointProbabilityDistribution Class, for which you want to
check the Imap
Returns
--------
boolean : True if bayesian model is Imap for given Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.factors import JointProbabilityDistribution
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> G.is_imap(JPD)
True
"""
if not isinstance(JPD, JointProbabilityDistribution):
raise TypeError("JPD must be an instance of JointProbabilityDistribution")
factors = [cpd.to_factor() for cpd in self.get_cpds()]
factor_prod = reduce(mul, factors)
JPD_fact = Factor(JPD.variables, JPD.cardinality, JPD.values)
if JPD_fact == factor_prod:
return True
else:
return False
| {
"content_hash": "c82e197329372ca1894d57b7d1cf9ada",
"timestamp": "",
"source": "github",
"line_count": 729,
"max_line_length": 111,
"avg_line_length": 37.79835390946502,
"alnum_prop": 0.5126111413536564,
"repo_name": "liquidmetal/pgmpy",
"id": "95d8a5d0ba11816c68a5895af9f08b321f7c7beb",
"size": "27579",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/models/BayesianModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "897788"
},
{
"name": "Shell",
"bytes": "1022"
}
],
"symlink_target": ""
} |
"""Test configs for fill."""
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_scalar_data
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32, tf.bool, tf.string],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.compat.v1.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=20)
| {
"content_hash": "8d9b6681983a222a2d34edcfcb632045",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 35.69047619047619,
"alnum_prop": 0.6537691794529686,
"repo_name": "Intel-Corporation/tensorflow",
"id": "47fe75adafcaaf5c3a8b98aa9d3003ca1a811972",
"size": "2188",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/lite/testing/op_tests/fill.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.