repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
fanscribed/django-react | refs/heads/fs | example/manage.py | 12 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangosite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rspavel/spack | refs/heads/develop | var/spack/repos/builtin/packages/libaio/package.py | 5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Libaio(MakefilePackage):
"""Linux native Asynchronous I/O interface library.
AIO enables even a single application thread to overlap I/O operations
with other processing, by providing an interface for submitting one or
more I/O requests in one system call (io_submit()) without waiting for
completion, and a separate interface (io_getevents()) to reap completed
I/O operations associated with a given completion group.
"""
homepage = "http://lse.sourceforge.net/io/aio.html"
url = "https://debian.inf.tu-dresden.de/debian/pool/main/liba/libaio/libaio_0.3.110.orig.tar.gz"
version('0.3.110', sha256='e019028e631725729376250e32b473012f7cb68e1f7275bfc1bbcdd0f8745f7e')
conflicts('platform=darwin', msg="libaio is a linux specific library")
@property
def install_targets(self):
return ['prefix={0}'.format(self.spec.prefix), 'install']
|
jobiols/knowledge | refs/heads/8.0 | document_rtf_index/tests/__init__.py | 5 | # -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_rtf_index
|
yuruofeifei/mxnet | refs/heads/master | python/mxnet/gluon/model_zoo/vision/inception.py | 10 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Inception, implemented in Gluon."""
__all__ = ['Inception3', 'inception_v3']
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ..custom_layers import HybridConcurrent
# Helpers
def _make_basic_conv(**kwargs):
out = nn.HybridSequential(prefix='')
out.add(nn.Conv2D(use_bias=False, **kwargs))
out.add(nn.BatchNorm(epsilon=0.001))
out.add(nn.Activation('relu'))
return out
def _make_branch(use_pool, *conv_settings):
out = nn.HybridSequential(prefix='')
if use_pool == 'avg':
out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
elif use_pool == 'max':
out.add(nn.MaxPool2D(pool_size=3, strides=2))
setting_names = ['channels', 'kernel_size', 'strides', 'padding']
for setting in conv_settings:
kwargs = {}
for i, value in enumerate(setting):
if value is not None:
kwargs[setting_names[i]] = value
out.add(_make_basic_conv(**kwargs))
return out
def _make_A(pool_features, prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(64, 1, None, None)))
out.add(_make_branch(None,
(48, 1, None, None),
(64, 5, None, 2)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, None, 1)))
out.add(_make_branch('avg',
(pool_features, 1, None, None)))
return out
def _make_B(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(384, 3, 2, None)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_C(channels_7x7, prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None)))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0))))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (7, 1), None, (3, 0)),
(channels_7x7, (1, 7), None, (0, 3)),
(channels_7x7, (7, 1), None, (3, 0)),
(192, (1, 7), None, (0, 3))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def _make_D(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None),
(320, 3, 2, None)))
out.add(_make_branch(None,
(192, 1, None, None),
(192, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0)),
(192, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_E(prefix):
out = HybridConcurrent(concat_dim=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(320, 1, None, None)))
branch_3x3 = nn.HybridSequential(prefix='')
out.add(branch_3x3)
branch_3x3.add(_make_branch(None,
(384, 1, None, None)))
branch_3x3_split = HybridConcurrent(concat_dim=1, prefix='')
branch_3x3_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
branch_3x3.add(branch_3x3_split)
branch_3x3dbl = nn.HybridSequential(prefix='')
out.add(branch_3x3dbl)
branch_3x3dbl.add(_make_branch(None,
(448, 1, None, None),
(384, 3, None, 1)))
branch_3x3dbl_split = HybridConcurrent(concat_dim=1, prefix='')
branch_3x3dbl.add(branch_3x3dbl_split)
branch_3x3dbl_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3dbl_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def make_aux(classes):
out = nn.HybridSequential(prefix='')
out.add(nn.AvgPool2D(pool_size=5, strides=3))
out.add(_make_basic_conv(channels=128, kernel_size=1))
out.add(_make_basic_conv(channels=768, kernel_size=5))
out.add(nn.Flatten())
out.add(nn.Dense(classes))
return out
# Net
class Inception3(HybridBlock):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, classes=1000, **kwargs):
super(Inception3, self).__init__(**kwargs)
# self.use_aux_logits = use_aux_logits
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(_make_basic_conv(channels=32, kernel_size=3, strides=2))
self.features.add(_make_basic_conv(channels=32, kernel_size=3))
self.features.add(_make_basic_conv(channels=64, kernel_size=3, padding=1))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_basic_conv(channels=80, kernel_size=1))
self.features.add(_make_basic_conv(channels=192, kernel_size=3))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_A(32, 'A1_'))
self.features.add(_make_A(64, 'A2_'))
self.features.add(_make_A(64, 'A3_'))
self.features.add(_make_B('B_'))
self.features.add(_make_C(128, 'C1_'))
self.features.add(_make_C(160, 'C2_'))
self.features.add(_make_C(160, 'C3_'))
self.features.add(_make_C(192, 'C4_'))
self.features.add(_make_D('D_'))
self.features.add(_make_E('E1_'))
self.features.add(_make_E('E2_'))
self.features.add(nn.AvgPool2D(pool_size=8))
self.features.add(nn.Dropout(0.5))
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Constructor
def inception_v3(pretrained=False, ctx=cpu(), root='~/.mxnet/models', **kwargs):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = Inception3(**kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('inceptionv3', root=root), ctx=ctx)
return net
|
fran918/repuestos365 | refs/heads/master | client/node_modules/node-gyp/gyp/gyp_main.py | 1452 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
|
nhippenmeyer/django | refs/heads/master | tests/fixtures_regress/models.py | 281 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
@python_2_unicode_compatible
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, models.SET_NULL, null=True)
def __str__(self):
return six.text_type(self.name) + ' is owned by ' + six.text_type(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572, #20820
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Subclass of a model with a ManyToManyField for test_ticket_20820
class SpecialArticle(Article):
pass
# Models to regression test #22421
class CommonFeature(Article):
class Meta:
abstract = True
class Feature(CommonFeature):
pass
# Models to regression test #11428
@python_2_unicode_compatible
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
@python_2_unicode_compatible
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', models.SET_NULL, null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
@python_2_unicode_compatible
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person, models.CASCADE)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
@python_2_unicode_compatible
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return (self.data,)
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
@python_2_unicode_compatible
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, models.CASCADE, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
class M2MToSelf(models.Model):
parent = models.ManyToManyField("self", blank=True)
@python_2_unicode_compatible
class BaseNKModel(models.Model):
"""
Base model with a natural_key and a manager with `get_by_natural_key`
"""
data = models.CharField(max_length=20, unique=True)
objects = NKManager()
class Meta:
abstract = True
def __str__(self):
return self.data
def natural_key(self):
return (self.data,)
class M2MSimpleA(BaseNKModel):
b_set = models.ManyToManyField("M2MSimpleB")
class M2MSimpleB(BaseNKModel):
pass
class M2MSimpleCircularA(BaseNKModel):
b_set = models.ManyToManyField("M2MSimpleCircularB")
class M2MSimpleCircularB(BaseNKModel):
a_set = models.ManyToManyField("M2MSimpleCircularA")
class M2MComplexA(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexB", through="M2MThroughAB")
class M2MComplexB(BaseNKModel):
pass
class M2MThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexA, models.CASCADE)
b = models.ForeignKey(M2MComplexB, models.CASCADE)
class M2MComplexCircular1A(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexCircular1B",
through="M2MCircular1ThroughAB")
class M2MComplexCircular1B(BaseNKModel):
c_set = models.ManyToManyField("M2MComplexCircular1C",
through="M2MCircular1ThroughBC")
class M2MComplexCircular1C(BaseNKModel):
a_set = models.ManyToManyField("M2MComplexCircular1A",
through="M2MCircular1ThroughCA")
class M2MCircular1ThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexCircular1A, models.CASCADE)
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
class M2MCircular1ThroughBC(BaseNKModel):
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
c = models.ForeignKey(M2MComplexCircular1C, models.CASCADE)
class M2MCircular1ThroughCA(BaseNKModel):
c = models.ForeignKey(M2MComplexCircular1C, models.CASCADE)
a = models.ForeignKey(M2MComplexCircular1A, models.CASCADE)
class M2MComplexCircular2A(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexCircular2B",
through="M2MCircular2ThroughAB")
class M2MComplexCircular2B(BaseNKModel):
def natural_key(self):
return (self.data,)
# Fake the dependency for a circularity
natural_key.dependencies = ["fixtures_regress.M2MComplexCircular2A"]
class M2MCircular2ThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexCircular2A, models.CASCADE)
b = models.ForeignKey(M2MComplexCircular2B, models.CASCADE)
|
vrthra/pygram | refs/heads/master | libs/accesslog.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
class LogAnalyzer():
""" Parses and summarizes nginx logfiles """
def __init__(self, content, topcount=5):
""" Initializing """
self.summary = {
"requests": {},
"ips": {},
"useragents": {}
}
self.topcount = topcount
self.content = content
def analyze(self):
""" Reads and splits the access-log into our dictionary """
lines = self.content.split("\n")
loglist = []
for s in lines:
line = s.strip()
if len(line) == 0: continue
tmp = line.split(' ')
ip = tmp[0]
#not the finest way...get indices of double quotes
doublequotes = LogAnalyzer.find_chars(line, '"')
#get the starting/ending indices of request & useragents by their quotes
request_start = doublequotes[0]+1
request_end = doublequotes[1]
useragent_start = doublequotes[4]+1
useragent_end = doublequotes[5]
request = line[request_start:request_end]
useragent = line[useragent_start:useragent_end]
#writing a dictionary per line into a list...huh...dunno
loglist.append({
"ip": ip,
"request": request,
"useragent": useragent
})
self.summarize(loglist)
def summarize(self, cols):
""" count occurences """
for col in cols:
if not col['request'] in self.summary['requests']:
self.summary['requests'][col['request']] = 0
self.summary['requests'][col['request']] += 1
if not col['ip'] in self.summary['ips']:
self.summary['ips'][col['ip']] = 0
self.summary['ips'][col['ip']] += 1
if not col['useragent'] in self.summary['useragents']:
self.summary['useragents'][col['useragent']] = 0
self.summary['useragents'][col['useragent']] += 1
@staticmethod
def find_chars(string, char):
""" returns a list of all indices of char inside string """
return [i for i, ltr in enumerate(string) if ltr == char]
|
blacklin/kbengine | refs/heads/master | kbe/src/lib/python/Lib/encodings/zlib_codec.py | 202 | """Python 'zlib_codec' Codec - zlib compression encoding.
This codec de/encodes from bytes to bytes.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input, errors='strict'):
assert errors == 'strict'
return (zlib.compress(input), len(input))
def zlib_decode(input, errors='strict'):
assert errors == 'strict'
return (zlib.decompress(input), len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec, codecs.StreamWriter):
charbuffertype = bytes
class StreamReader(Codec, codecs.StreamReader):
charbuffertype = bytes
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
|
acsone/hr | refs/heads/8.0 | hr_family/models/hr_employee_marital_status.py | 1 | # -*- coding: utf-8 -*-
# © 2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
class HrEmployeeMaritalStatus(models.Model):
_name = 'hr.employee.marital.status'
_description = 'Marital status'
name = fields.Char(required=True, translate=True)
code = fields.Char()
@api.multi
def name_get(self):
return [
(
this.id,
'[%s] %s' % (this.code, this.name) if this.code
else this.name
)
for this in self
]
|
imageboards/Orphereus | refs/heads/master | Orphereus/model/Picture.py | 1 | ################################################################################
# Copyright (C) 2009 Johan Liebert, Mantycore, Hedger, Rusanon #
# < anoma.team@gmail.com ; http://orphereus.anoma.ch > #
# #
# This file is part of Orphereus, an imageboard engine. #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
################################################################################
import sqlalchemy as sa
from sqlalchemy import orm
from Orphereus.model import meta
import os
import Image
import logging
log = logging.getLogger(__name__)
t_piclist = sa.Table("picture", meta.metadata,
sa.Column("id" , sa.types.Integer, primary_key = True),
sa.Column("path" , sa.types.String(255), nullable = False),
sa.Column("thumpath" , sa.types.String(255), nullable = False),
sa.Column("width" , sa.types.Integer, nullable = True),
sa.Column("height" , sa.types.Integer, nullable = True),
sa.Column("thwidth" , sa.types.Integer, nullable = False),
sa.Column("thheight" , sa.types.Integer, nullable = False),
sa.Column("size" , sa.types.Integer, nullable = False),
sa.Column("md5" , sa.types.String(32), nullable = False),
sa.Column("extid" , sa.types.Integer, sa.ForeignKey('extension.id')),
sa.Column("pictureInfo" , sa.types.UnicodeText, nullable = True),
#sa.Column("animpath" , sa.types.String(255), nullable = True), #TODO: XXX: dirty solution
)
t_filesToPostsMap = sa.Table("filesToPostsMap", meta.metadata,
sa.Column("id" , sa.types.Integer, primary_key = True),
sa.Column('postId', sa.types.Integer, sa.ForeignKey('post.id'), primary_key = True),
sa.Column('fileId', sa.types.Integer, sa.ForeignKey('picture.id'), primary_key = True),
sa.Column("spoiler", sa.types.Boolean, nullable = True),
sa.Column("relationInfo" , sa.types.UnicodeText, nullable = True),
sa.Column("animpath" , sa.types.String(255), nullable = True),
)
class PictureAssociation(object):
def __init__(self, spoiler, relationInfo, animPath):
self.spoiler = spoiler
self.relationInfo = relationInfo
self.animpath = animPath
class Picture(object):
def __init__(self, relativeFilePath, thumbFilePath, fileSize, picSizes, extId, md5, pictureInfo):
self.path = relativeFilePath
self.thumpath = thumbFilePath
self.width = picSizes[0]
self.height = picSizes[1]
self.thwidth = picSizes[2]
self.thheight = picSizes[3]
self.extid = extId
self.size = fileSize
self.md5 = md5
self.pictureInfo = pictureInfo
@staticmethod
def create(relativeFilePath, thumbFilePath, fileSize, picSizes, extId, md5, pictureInfo, commit = False):
pic = Picture(relativeFilePath, thumbFilePath, fileSize, picSizes, extId, md5, pictureInfo)
if commit:
meta.Session.add(pic)
meta.Session.commit()
return pic
@staticmethod
def getPicture(id):
return Picture.query.filter(Picture.id == id).first()
@staticmethod
def getByMd5(md5):
q = Picture.query.filter(Picture.md5 == md5)
assert q.count() < 2
return q.first()
@staticmethod
def makeThumbnail(source, dest, maxSize):
sourceImage = Image.open(source)
size = sourceImage.size
if sourceImage:
sourceImage.thumbnail(maxSize, Image.ANTIALIAS)
sourceImage.save(dest)
return size + sourceImage.size
else:
return []
def pictureRefCount(self):
from Orphereus.model.Post import Post
return Post.query.filter(Post.attachments.any(PictureAssociation.attachedFile.has(Picture.id == self.id))).count()
def deletePicture(self, commit = True):
if self.id > 0 and self.pictureRefCount() == 0:
filePath = os.path.join(meta.globj.OPT.uploadPath, self.path)
thumPath = os.path.join(meta.globj.OPT.uploadPath, self.thumpath)
if os.path.isfile(filePath):
os.unlink(filePath)
ext = self.extension
if not ext.path:
if os.path.isfile(thumPath):
os.unlink(thumPath)
meta.Session.delete(self)
if commit:
meta.Session.commit()
|
mlufei/depot_tools | refs/heads/master | third_party/boto/s3/__init__.py | 59 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class S3RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Amazon S3 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from .connection import S3Connection
return [S3RegionInfo(name='us-east-1',
endpoint='s3.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-1',
endpoint='s3-us-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='us-west-2',
endpoint='s3-us-west-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-northeast-1',
endpoint='s3-ap-northeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-1',
endpoint='s3-ap-southeast-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='ap-southeast-2',
endpoint='s3-ap-southeast-2.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='eu-west-1',
endpoint='s3-eu-west-1.amazonaws.com',
connection_cls=S3Connection),
S3RegionInfo(name='sa-east-1',
endpoint='s3-sa-east-1.amazonaws.com',
connection_cls=S3Connection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
Gogistics/prjGogistics | refs/heads/master | prjGogistics/src/dispatchers/dispatchers_index.py | 1 | # -*- coding: utf-8 -*-
'''
Created on May 21, 2014
@author: Alan Tai
@Discription:
The project is for developing the company web-site. The part is the view controller of dispatching url paths
'''
__author__ = 'Alan Tai<gogistics@gogistics-tw.com>'
import json
import logging
from handlers_general.handler_languages_versions import MandarinHandler,\
EnglishHandler
import webapp2
import jinja2
import os
from dictionaries.dict_keys_values import KeysVaulesGeneral, KeysValuesMandarin,\
KeysValuesEnglish
from dictionaries.dict_html_pages import HtmlPagesReference
from google.appengine.api import taskqueue
#append templates' path directory
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader('static/templates')) # append templates' path
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
#dictionaries instances
keys_values_general = KeysVaulesGeneral()
keys_values_mandarin = KeysValuesMandarin()
keys_values_english = KeysValuesEnglish()
#html reference
html_pages_ref = HtmlPagesReference()
language_handler_madarin = MandarinHandler()
language_handler_english = EnglishHandler()
#index handler
class IndexPageDispatcher(webapp2.RequestHandler):
def get(self):
''' index dispatcher '''
# for gcm demo use; for testing only
# taskqueue.add(url = '/gcm_send_message')
# index context
template_values = {}
template_values.update(language_handler_madarin.handle_index_page_info())
template = jinja_environment.get_template(html_pages_ref.html_index)
self.response.out.write(template.render(template_values))
class IndexContactMessageDispatcher(webapp2.RequestHandler):
def post(self):
pass
class IndexLanguageVersionDispatcher(webapp2.RequestHandler):
def post(self):
assert self.request.get('fmt'), 'data format is not available'
assert self.request.get('json_language_version_request'), 'request content is not available'
json_obj = {}
if self.request.get('fmt') == 'json':
json_obj = json.loads(self.request.get('json_language_version_request'))
token_html_page = json_obj['token_html_page']
requested_language = json_obj['language']
ajax_response = {}
if requested_language == 'mandarin' and token_html_page == keys_values_general.token_index_page:
ajax_response.update(language_handler_madarin.handle_index_page_info())
ajax_response['request_status'] = 'success'
elif requested_language == 'english' and token_html_page == keys_values_general.token_index_page:
ajax_response.update(language_handler_english.handle_index_page_info())
ajax_response['request_status'] = 'success'
else:
ajax_response.update(language_handler_madarin.handle_index_page_info())
ajax_response['request_status'] = 'success'
self.response.out.headers['Content-Type'] = 'text/json'
self.response.out.write(json.dumps(ajax_response))
#url dispatcher
app = webapp2.WSGIApplication([('/', IndexPageDispatcher),
('/index_language_version_handler', IndexLanguageVersionDispatcher)], debug=True)
#log
logging.getLogger().setLevel(logging.DEBUG)
|
FATruden/boto | refs/heads/master | boto/beanstalk/layer1.py | 13 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
|
Xarthisius/girder | refs/heads/master | girder/utility/_cache.py | 4 | import cherrypy
from dogpile.cache import make_region, register_backend
from dogpile.cache.backends.memory import MemoryBackend
class CherrypyRequestBackend(MemoryBackend):
"""
A memory backed cache for individual CherryPy requests.
This provides a cache backend for dogpile.cache which is designed
to work in a thread-safe manner using cherrypy.request, a thread local
storage that only lasts for the duration of a request.
"""
def __init__(self, arguments):
pass
@property
def _cache(self):
if not hasattr(cherrypy.request, '_girderCache'):
setattr(cherrypy.request, '_girderCache', {})
return cherrypy.request._girderCache
register_backend('cherrypy_request', 'girder.utility._cache', 'CherrypyRequestBackend')
# These caches must be configured with the null backend upon creation due to the fact
# that user-based configuration of the regions doesn't happen until server start, which
# doesn't occur when using Girder as a library.
cache = make_region(name='girder.cache').configure(backend='dogpile.cache.null')
requestCache = make_region(name='girder.request').configure(backend='dogpile.cache.null')
# This cache is not configurable by the user, and will always be configured when the server is.
# It holds data for rate limiting, which is ephemeral, but must be persisted (i.e. it's not optional
# or best-effort).
rateLimitBuffer = make_region(name='girder.rate_limit')
|
ondrokrc/gramps | refs/heads/master | gramps/gen/plug/menu/_note.py | 2 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a string.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import StringOption
#-------------------------------------------------------------------------
#
# NoteOption class
#
#-------------------------------------------------------------------------
class NoteOption(StringOption):
"""
This class describes an option that allows a note from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A friendly label to be applied to this option.
Example: "Title Note"
:type label: string
:param value: A Gramps ID of a note for this option.
Example: "n11"
:type value: string
:return: nothing
"""
StringOption.__init__(self, label, "")
|
aquatix/imagine-gallery | refs/heads/master | imaginegallery/imagine/managers.py | 1 | # encoding: utf-8
from django.db import models
class CollectionManager(models.Manager):
def public_collections(self):
return self.exclude(is_public=False)
|
ProstoMaxim/incubator-airflow | refs/heads/master | airflow/contrib/operators/sqoop_operator.py | 20 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a sqoop 1 operator
"""
from airflow.contrib.hooks.sqoop_hook import SqoopHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SqoopOperator(BaseOperator):
"""
execute sqoop job
"""
@apply_defaults
def __init__(self,
conn_id='sqoop_default',
cmd_type='import',
table=None,
query=None,
target_dir=None,
append=None,
file_type=None,
columns=None,
num_mappers=None,
split_by=None,
where=None,
export_dir=None,
input_null_string=None,
input_null_non_string=None,
staging_table=None,
clear_staging_table=False,
enclosed_by=None,
escaped_by=None,
input_fields_terminated_by=None,
input_lines_terminated_by=None,
input_optionally_enclosed_by=None,
batch=False,
direct=False,
driver=None,
verbose=False,
relaxed_isolation=False,
properties=None,
hcatalog_database=None,
hcatalog_table=None,
*args,
**kwargs):
"""
:param conn_id: str
:param cmd_type: str specify command to execute "export" or "import"
:param table: Table to read
:param target_dir: HDFS destination directory where the data
from the rdbms will be written
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" Imports data to
into the specified format. Defaults to text.
:param columns: <col,col,col> Columns to import from table
:param num_mappers: Use n mapper tasks to import/export in parallel
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param export_dir: HDFS Hive database directory to export to the rdbms
:param input_null_string: The string to be interpreted as null
for string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the input field separator
:param input_lines_terminated_by: Sets the input end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param direct: Use direct export fast path
:param driver: Manually specify JDBC driver class to use
:param verbose: Switch to more verbose logging for debug purposes
:param relaxed_isolation: use read uncommitted isolation level
:param hcatalog_database: Specifies the database name for the HCatalog table
:param hcatalog_table: The argument value for this option is the HCatalog table
:param properties: additional JVM properties passed to sqoop
"""
super(SqoopOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.cmd_type = cmd_type
self.table = table
self.query = query
self.target_dir = target_dir
self.append = append
self.file_type = file_type
self.columns = columns
self.num_mappers = num_mappers
self.split_by = split_by
self.where = where
self.export_dir = export_dir
self.input_null_string = input_null_string
self.input_null_non_string = input_null_non_string
self.staging_table = staging_table
self.clear_staging_table = clear_staging_table
self.enclosed_by = enclosed_by
self.escaped_by = escaped_by
self.input_fields_terminated_by = input_fields_terminated_by
self.input_lines_terminated_by = input_lines_terminated_by
self.input_optionally_enclosed_by = input_optionally_enclosed_by
self.batch = batch
self.direct = direct
self.driver = driver
self.verbose = verbose
self.relaxed_isolation = relaxed_isolation
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
# No mutable types in the default parameters
if properties is None:
properties = {}
self.properties = properties
def execute(self, context):
"""
Execute sqoop job
"""
hook = SqoopHook(conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties)
if self.cmd_type == 'export':
hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation)
elif self.cmd_type == 'import':
if not self.table:
hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver)
elif not self.query:
hook.import_query(
query=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
|
npiganeau/odoo | refs/heads/master | addons/sale/report/invoice_report.py | 336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_inherit = 'account.invoice.report'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_depends = {
'account.invoice': ['section_id'],
}
def _select(self):
return super(account_invoice_report, self)._select() + ", sub.section_id as section_id"
def _sub_select(self):
return super(account_invoice_report, self)._sub_select() + ", ai.section_id as section_id"
def _group_by(self):
return super(account_invoice_report, self)._group_by() + ", ai.section_id"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
fsbr/se3-path-planner | refs/heads/master | modularPlanner/cuspAnalyze.py | 2 | #2 okay lets do this
import sys
sys.path.append('../.')
import pandas as pd
import numpy as np
import itertools as it
import path_planner as plan
pathName = '../../data-se3-path-planner/cherylData/'
pathName1 = '../../data-se3-path-planner/yearData/batch2019/'
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
inclinations = ['55', '60', '65', '70', '75', '80', '85', '90']
inclinations = inclinations[::-1] # reverses the inclinations
filesList =[[pathName+month+inclination+'.txt' for month in months ] for inclination in inclinations]
# print( filesList)
# filesList = ['55deg.txt', '60deg.txt','65deg.txt', '70deg.txt', '75deg.txt','80deg.txt','85deg.txt']
# filesList = [pathName+files for files in filesList]
# filesList = ['80deg.txt']
# anglesList = [55, 60, 65, 70, 75, 80, 85]
cma = []
def createCma(files):
# print("FILES", files)
df = pd.read_csv(files)
# print(df.tail())
Xgse = df['DefaultSC.gse.X']
Ygse = df['DefaultSC.gse.Y']
Zgse = df['DefaultSC.gse.Z']
# i am DEFINITELY going to end up with a dimension problem here
t = df['DefaultSC.A1ModJulian']
# print(t.tail())
t = t.tolist()
t = [t_index + 29999.5 for t_index in t]
# print("hopefully something close to the real time", t)
# print("type of t", type(t.tolist()))
# refer to tsyganenko for these coordinate systems
# the output here is in radians
# angle = np.arctan2(Xgse,Zgse)
# theta = np.arctan2(Ygse,Xgse)
angle = np.arctan2(Zgse,Xgse)
theta = np.arctan2(Xgse,Ygse)
print("type angle,", type(angle))
# but sometimes i print it in degrees
# print("angle is" , angle * 180 / np.pi)
# print("theta is", theta)
# make it into an array for iteration (probably a faster way)
# print(angle)
# print(len(angle))
count = 0
region = []
# for x,x1 in zip(angle, angle[1:]):
# eventually this has to be modified so that the unh professors script
# will alter the dipole angle
# These numbers are from the tsyganenko script that I wrote a while back
# if x<0.2151 or x> 0.2849:
# if 0.2151<=x1<=0.2849:
# count+=1
# angle = angle[:20]
# lets get this boundary crossing thing right
# Okay I think I did it
lowBound,highBound,lowLateralBound,highLateralBound = plan.GridGraph().getGoalRegion(t)
# print("type of llb", lowLateralBound)
# print("type of lb", lowBound)
# print("type of hb", highBound)
# print("type of llb", lowLateralBound)
# print("type of hlb", highLateralBound)
# lowBound = np.asarray(lowBound).tolist()
# highBound = np.asarray(highBound).tolist()
# lowLateralBound = np.asarray(lowLateralBound).tolist()
# highLateralBound = np.asarray(highLateralBound).tolist()
# print("lowbound hadhasdf", len(lowBound))
# print("lowbound", lowBound)
# print("highBound", highBound)
# print("lowLateralBound", lowLateralBound)
# print("highLateralbound", highLateralBound)
# lowBound = 0.2151/2
# highBound = 0.2849/2
# lateralBound = 5.0/2
# implement the tsyganenko function and dipole tilt for dynamic changing
# of the cusp location
# for x,y,lb,ub,llb,hlb in zip(angle, theta, lowBound, highBound, lowLateralBound, highLateralBound):
for x,y,lb,hb,llb,hlb in zip(angle,theta,lowBound,highBound,lowLateralBound,highLateralBound):
# the biggest thing is a modification of these thresholds
# if lowBound<=x<=highBound and lowLateralBound<=y<=highLateralBound:
if lb<=x<=hb and llb<=y<=hlb:
# if lowLateralBound<=y<=highLateralBound:
region.append(1)
else:
region.append(0)
for x,x1 in zip(region, region[1:]):
if x==0 and x1 == 1:
count+=1
else:
pass
# print("x", angle)
# print("x1", angle[1:])
# print("count",count)
# the main problem is with the dimensions of the cma variable
# so how do i get cma to have the same dimensions as filesList?
cma.append([count])
# print("cma", cma)
# print("region",region)
# print("region", region[:100])
return count
cma2 =[]
# the fact that you can call a function in a list comprehension is the number one reason
# why i'm going to stick with python
cma2 =[[createCma(pathName1+month+inclination+'.txt') for month in months ] for inclination in inclinations]
# cma2 = [[createCma(pathName1+'test4.txt')]]
print("cma2", cma2)
if __name__ == "__main__":
a = 3
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pcolor(cma2,cmap=my_cmap)
colorbar()
plt.title('Cusp Crossings')
plt.xlabel('Start Month')
plt.ylabel('55+5y deg inclination')
plt.show()
|
Sutto/cloud-custodian | refs/heads/master | tools/c7n_gcp/c7n_gcp/entry.py | 1 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
# This is an alphabetically sorted import list
import c7n_gcp.actions
import c7n_gcp.filters
import c7n_gcp.output
import c7n_gcp.policy
import c7n_gcp.resources.appengine
import c7n_gcp.resources.bigquery
import c7n_gcp.resources.build
import c7n_gcp.resources.cloudbilling
import c7n_gcp.resources.compute
import c7n_gcp.resources.dataflow
import c7n_gcp.resources.deploymentmanager
import c7n_gcp.resources.dns
import c7n_gcp.resources.function
import c7n_gcp.resources.gke
import c7n_gcp.resources.iam
import c7n_gcp.resources.kms
import c7n_gcp.resources.loadbalancer
import c7n_gcp.resources.logging
import c7n_gcp.resources.mlengine
import c7n_gcp.resources.network
import c7n_gcp.resources.pubsub
import c7n_gcp.resources.resourcemanager
import c7n_gcp.resources.service
import c7n_gcp.resources.source
import c7n_gcp.resources.spanner
import c7n_gcp.resources.storage
import c7n_gcp.resources.sql # noqa: F401
from c7n_gcp.provider import resources as gcp_resources
logging.getLogger('googleapiclient.discovery').setLevel(logging.WARNING)
# Let resource registry subscribers have a chance to look at full set of resources.
gcp_resources.notify(gcp_resources.EVENT_FINAL)
def initialize_gcp():
pass
|
abhijo89/Django-facebook | refs/heads/master | docs/docs_env/Lib/encodings/mac_croatian.py | 593 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
sahana/Turkey | refs/heads/master | modules/templates/VM/controllers.py | 15 | # -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "VM"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
if current.deployment_settings.has_module("cms"):
system_roles = current.auth.get_system_roles()
ADMIN = system_roles.ADMIN in current.session.s3.roles
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "index"
query = (ltable.module == module) & \
((ltable.resource == None) | \
(ltable.resource == resource)) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby=(0, 1)).first()
if item:
if ADMIN:
item = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[item.id, "update"]),
_class="action-btn"))
else:
item = DIV(XML(item.body))
elif ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class)
else:
item = ""
else:
item = ""
output["item"] = item
self._view(THEME, "index.html")
return output
# END =========================================================================
|
CSC-ORG/Dynamic-Dashboard-2015 | refs/heads/master | engine/lib/python2.7/site-packages/django/contrib/gis/gdal/tests/test_geom.py | 94 | import json
from binascii import b2a_hex
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.six.moves import xrange
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
OGRException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test02_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(OGRException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
|
ktnyt/chainer | refs/heads/master | chainer/functions/array/split_axis.py | 1 | import numpy
import six
import chainer
from chainer import backend
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import collections_abc
from chainer.utils import type_check
import chainerx
_numpy_split_ok = numpy.lib.NumpyVersion(numpy.__version__) >= '1.11.0'
def _fix_numpy_split(ys, x, indices_or_sections, axis):
"""Make the output of np.split compatible with numpy >= 1.11"""
if all(y.ndim == x.ndim for y in ys):
return ys
tmp = [len(t) for t in numpy.split(
numpy.empty(x.shape[axis], dtype=numpy.int8), indices_or_sections, 0)]
shape = list(x.shape)
for i, t in enumerate(tmp):
y = ys[i]
if y.ndim != x.ndim:
assert y.size == 0
shape[axis] = t
ys[i] = y.reshape(shape)
return ys
def _get_indices_or_sections(indices_or_sections):
"""Checks and convert ``indices_or_sections`` argument
Converted value is one of: 1-D numpy.ndarray, list, int, and
NumPy int scalar.
Returns:
A binary tuple in which the 1st element is indices (sequence) and
the 2nd element is sections (scalar).
Only one of the two is not ``None`` and the other is ``None``.
"""
ios = indices_or_sections
is_seq = False
if isinstance(ios, numpy.ndarray):
# numpy.ndarray
if ios.dtype.kind != 'i' and ios.size > 0:
# Note: numpy.array([]) (dtype is float64) should be accepted.
raise TypeError('indices_or_sections must be integers')
if ios.ndim >= 2:
raise TypeError('indices_or_sections must be 1-D sequence')
is_seq = ios.ndim != 0
elif isinstance(ios, collections_abc.Sequence):
# Any sequence except numpy.ndarray
ios = list(ios)
is_seq = True
elif isinstance(indices_or_sections, six.integer_types):
# int
pass
else:
raise TypeError(
'indices_or_sections must be integer or 1-D array.\n'
'Actual: {}'.format(type(indices_or_sections)))
if is_seq and chainer.is_debug():
for p, n in six.moves.zip(ios, ios[1:]):
if p > n:
raise ValueError('indices_or_sections must be sorted')
if is_seq:
return ios, None
else:
return None, ios
class SplitAxis(function_node.FunctionNode):
"""Function that splits multiple arrays along the specified axis."""
def __init__(self, indices_or_sections, axis):
indices, sections = _get_indices_or_sections(indices_or_sections)
assert (indices is None) != (sections is None)
self.indices = indices
self.sections = sections
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].ndim > self.axis)
if self.indices is not None:
indices = self.indices
if len(indices) > 0:
max_index = type_check.make_variable(indices[-1], 'max_index')
type_check.expect(in_types[0].shape[self.axis] >= max_index)
else:
assert self.sections is not None
sections = type_check.make_variable(self.sections, 'sections')
type_check.expect(in_types[0].shape[self.axis] % sections == 0)
@property
def indices_or_sections(self):
return self.indices if self.indices is not None else self.sections
def forward_chainerx(self, inputs):
x, = inputs
return tuple(chainerx.split(x, self.indices_or_sections, self.axis))
def forward(self, inputs):
# Currently iDeep only supports 4 dims
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))
and self._ideep_is_supported(inputs)):
return self._forward_ideep(inputs)
x, = inputs
self._xp = backend.get_array_module(x)
indices_or_sections = self.indices_or_sections
ret = self._xp.split(x, indices_or_sections, self.axis)
if self._xp == numpy and not _numpy_split_ok:
ret = _fix_numpy_split(ret, x, indices_or_sections, self.axis)
self._shapes = [r.shape for r in ret]
return tuple(ret)
def _ideep_is_supported(self, inputs):
# Returns True if iDeep supports current configuration of inputs and
# arguments. This is workaround for limitation in iDeep internal
# implementation.
if self.indices is not None:
indices = self.indices
if len(indices) == 0:
return False # Empty sequence
if indices[0] == 0:
return False # Sequence starting with 0
for i in six.moves.range(1, len(indices)):
if indices[i-1] == indices[i]:
return False # Sequence with duplicate index
else:
if self.sections == 1:
return False # 1
# Workaround for iDeep segfault issue
# See:
# https://github.com/chainer/chainer/pull/4281#issuecomment-365830630
# TODO(niboshi): Remove this after iDeep is fixed.
# Note: inputs[0].ndim is always 4.
if (self.axis == 1 or self.axis == -3) and inputs[0].shape[1] == 8:
return False
return True
def _forward_ideep(self, inputs):
x, = inputs
offsets = intel64.ideep.intVector()
# TODO(iDeep)
# bypass python3 issue when transfer array to std::vector<>
# https://github.com/SimpleITK/SimpleITK/issues/106
axis = self.axis % x.ndim
if self.indices is not None:
for i in self.indices:
offsets.push_back(int(i))
else:
d = x.shape[self.axis]
step = d // self.sections
for i in six.moves.range(step, d, step):
offsets.push_back(i)
ret = intel64.ideep.concat.Backward(
intel64.ideep.array(x), offsets, axis)
self._shapes = [r.shape for r in ret]
return ret
def backward(self, indexes, grad_outputs):
dtype = self.inputs[0].dtype
grads = [
self._xp.zeros(shape, dtype=dtype) if gy is None else gy
for gy, shape in six.moves.zip(grad_outputs, self._shapes)]
return chainer.functions.concat(grads, self.axis),
def split_axis(x, indices_or_sections, axis, force_tuple=True):
"""Splits given variables along an axis.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
A variable to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
force_tuple (bool): If ``True`` (the default) this method returns a
tuple even when the number of outputs is one. Otherwise, if
``False`` a Variable will be returned when the number of outputs
is one.
Returns:
tuple or Variable: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
When ``force_tuple`` is ``True``, returned value is always a tuple
regardless of the number of outputs.
"""
res = SplitAxis(indices_or_sections, axis).apply((x,))
if force_tuple or len(res) != 1:
return res
return res[0]
|
J4LP/eve-wspace | refs/heads/develop | evewspace/Map/admin_pages.py | 23 | from core.admin_page_registry import registry
registry.register('Map Admin', 'map_admin.html', 'Map.map_admin')
|
kivatu/kivy_old | refs/heads/master | examples/widgets/scrollview.py | 17 | import kivy
kivy.require('1.0.8')
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
class ScrollViewApp(App):
def build(self):
# create a default grid layout with custom width/height
layout = GridLayout(cols=1, padding=10, spacing=10,
size_hint=(None, None), width=500)
# when we add children to the grid layout, its size doesn't change at
# all. we need to ensure that the height will be the minimum required to
# contain all the childs. (otherwise, we'll child outside the bounding
# box of the childs)
layout.bind(minimum_height=layout.setter('height'))
# add button into that grid
for i in range(30):
btn = Button(text=str(i), size=(480, 40),
size_hint=(None, None))
layout.add_widget(btn)
# create a scroll view, with a size < size of the grid
root = ScrollView(size_hint=(None, None), size=(500, 320),
pos_hint={'center_x': .5, 'center_y': .5}
, do_scroll_x=False)
root.add_widget(layout)
return root
if __name__ == '__main__':
ScrollViewApp().run()
|
klusark/android_external_chromium_org | refs/heads/cm-11.0 | chrome/common/extensions/docs/server2/github_file_system_test.py | 23 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from appengine_blobstore import AppEngineBlobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import files
from fake_fetchers import ConfigureFakeFetchers
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
import url_constants
class GithubFileSystemTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._base_path = os.path.join(sys.path[0],
'test_data',
'github_file_system')
self._file_system = GithubFileSystem.Create(ObjectStoreCreator.ForTest())
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testList(self):
self.assertEqual(json.loads(self._ReadLocalFile('expected_list.json')),
self._file_system.Read(['/']).Get())
def testRead(self):
self.assertEqual(self._ReadLocalFile('expected_read.txt'),
self._file_system.ReadSingle('/analytics/launch.js'))
def testStat(self):
self.assertEqual(0, self._file_system.Stat('zipball').version)
def testKeyGeneration(self):
self.assertEqual(0, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/launch.js')
self.assertEqual(1, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/main.css')
self.assertEqual(1, len(files.GetBlobKeys()))
if __name__ == '__main__':
unittest.main()
|
CatsAndDogsbvba/odoo | refs/heads/8.0 | addons/hw_proxy/__openerp__.py | 313 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
daltonmaag/robofab | refs/heads/master | Docs/Examples/objects/RInfo_00.py | 7 | # robofab manual
# Info object
# usage examples
from robofab.world import CurrentFont
f = CurrentFont()
print f.info.postscriptFullName
print f.info.openTypeNameDesigner
f.info.openTypeNameDesigner = "Jan van Krimpen"
print f.info.openTypeNameDesigner
print f.info.openTypeOS2VendorID
print f.info.unitsPerEm
print f.info.xHeight
print f.info.openTypeNameLicenseURL
# but you can set the values as well
f.info.postscriptUniqueID = 4309359
f.info.openTypeNameDesigner = "Eric Gill"
|
mcepl/youtube-dl | refs/heads/master | youtube_dl/extractor/niconico.py | 17 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
import datetime
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
parse_iso8601,
sanitized_Request,
xpath_text,
determine_ext,
urlencode_postdata,
)
class NiconicoIE(InfoExtractor):
IE_NAME = 'niconico'
IE_DESC = 'ニコニコ動画'
_TESTS = [{
'url': 'http://www.nicovideo.jp/watch/sm22312215',
'md5': 'd1a75c0823e2f629128c43e1212760f9',
'info_dict': {
'id': 'sm22312215',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'uploader': 'takuya0301',
'uploader_id': '2698420',
'upload_date': '20131123',
'timestamp': 1385182762,
'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
'duration': 33,
},
}, {
# File downloaded with and without credentials are different, so omit
# the md5 field
'url': 'http://www.nicovideo.jp/watch/nm14296458',
'info_dict': {
'id': 'nm14296458',
'ext': 'swf',
'title': '【鏡音リン】Dance on media【オリジナル】take2!',
'description': 'md5:689f066d74610b3b22e0f1739add0f58',
'uploader': 'りょうた',
'uploader_id': '18822557',
'upload_date': '20110429',
'timestamp': 1304065916,
'duration': 209,
},
}, {
# 'video exists but is marked as "deleted"
# md5 is unstable
'url': 'http://www.nicovideo.jp/watch/sm10000',
'info_dict': {
'id': 'sm10000',
'ext': 'unknown_video',
'description': 'deleted',
'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
'upload_date': '20071224',
'timestamp': 1198527840, # timestamp field has different value if logged in
'duration': 304,
},
}, {
'url': 'http://www.nicovideo.jp/watch/so22543406',
'info_dict': {
'id': '1388129933',
'ext': 'mp4',
'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
'timestamp': 1388851200,
'upload_date': '20140104',
'uploader': 'アニメロチャンネル',
'uploader_id': '312',
}
}]
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
# Determine whether the downloader used authentication to download video
_AUTHENTICATED = False
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
# No authentication to be performed
if not username:
return True
# Log in
login_form_strs = {
'mail': username,
'password': password,
}
login_data = urlencode_postdata(login_form_strs)
request = sanitized_Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
request, None, note='Logging in', errnote='Unable to log in')
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
# Successful login
self._AUTHENTICATED = True
return True
def _real_extract(self, url):
video_id = self._match_id(url)
# Get video webpage. We are not actually interested in it for normal
# cases, but need the cookies in order to be able to download the
# info webpage
webpage, handle = self._download_webpage_handle(
'http://www.nicovideo.jp/watch/' + video_id, video_id)
if video_id.startswith('so'):
video_id = self._match_id(handle.geturl())
video_info = self._download_xml(
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
note='Downloading video info page')
if self._AUTHENTICATED:
# Get flv info
flv_info_webpage = self._download_webpage(
'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
video_id, 'Downloading flv info')
else:
# Get external player info
ext_player_info = self._download_webpage(
'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
thumb_play_key = self._search_regex(
r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
# Get flv info
flv_info_data = compat_urllib_parse_urlencode({
'k': thumb_play_key,
'v': video_id
})
flv_info_request = sanitized_Request(
'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
{'Content-Type': 'application/x-www-form-urlencoded'})
flv_info_webpage = self._download_webpage(
flv_info_request, video_id,
note='Downloading flv info', errnote='Unable to download flv info')
flv_info = compat_urlparse.parse_qs(flv_info_webpage)
if 'url' not in flv_info:
if 'deleted' in flv_info:
raise ExtractorError('The video has been deleted.',
expected=True)
else:
raise ExtractorError('Unable to find video URL')
video_real_url = flv_info['url'][0]
# Start extracting information
title = xpath_text(video_info, './/title')
if not title:
title = self._og_search_title(webpage, default=None)
if not title:
title = self._html_search_regex(
r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
webpage, 'video title')
watch_api_data_string = self._html_search_regex(
r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
webpage, 'watch api data', default=None)
watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
video_detail = watch_api_data.get('videoDetail', {})
extension = xpath_text(video_info, './/movie_type')
if not extension:
extension = determine_ext(video_real_url)
thumbnail = (
xpath_text(video_info, './/thumbnail_url') or
self._html_search_meta('image', webpage, 'thumbnail', default=None) or
video_detail.get('thumbnail'))
description = xpath_text(video_info, './/description')
timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve'))
if not timestamp:
match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
if match:
timestamp = parse_iso8601(match.replace('+', ':00+'))
if not timestamp and video_detail.get('postedAt'):
timestamp = parse_iso8601(
video_detail['postedAt'].replace('/', '-'),
delimiter=' ', timezone=datetime.timedelta(hours=9))
view_count = int_or_none(xpath_text(video_info, './/view_counter'))
if not view_count:
match = self._html_search_regex(
r'>Views: <strong[^>]*>([^<]+)</strong>',
webpage, 'view count', default=None)
if match:
view_count = int_or_none(match.replace(',', ''))
view_count = view_count or video_detail.get('viewCount')
comment_count = int_or_none(xpath_text(video_info, './/comment_num'))
if not comment_count:
match = self._html_search_regex(
r'>Comments: <strong[^>]*>([^<]+)</strong>',
webpage, 'comment count', default=None)
if match:
comment_count = int_or_none(match.replace(',', ''))
comment_count = comment_count or video_detail.get('commentCount')
duration = (parse_duration(
xpath_text(video_info, './/length') or
self._html_search_meta(
'video:duration', webpage, 'video duration', default=None)) or
video_detail.get('length'))
webpage_url = xpath_text(video_info, './/watch_url') or url
if video_info.find('.//ch_id') is not None:
uploader_id = video_info.find('.//ch_id').text
uploader = video_info.find('.//ch_name').text
elif video_info.find('.//user_id') is not None:
uploader_id = video_info.find('.//user_id').text
uploader = video_info.find('.//user_nickname').text
else:
uploader_id = uploader = None
return {
'id': video_id,
'url': video_real_url,
'title': title,
'ext': extension,
'format_id': 'economy' if video_real_url.endswith('low') else 'normal',
'thumbnail': thumbnail,
'description': description,
'uploader': uploader,
'timestamp': timestamp,
'uploader_id': uploader_id,
'view_count': view_count,
'comment_count': comment_count,
'duration': duration,
'webpage_url': webpage_url,
}
class NiconicoPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
_TEST = {
'url': 'http://www.nicovideo.jp/mylist/27411728',
'info_dict': {
'id': '27411728',
'title': 'AKB48のオールナイトニッポン',
},
'playlist_mincount': 225,
}
def _real_extract(self, url):
list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id)
entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
webpage, 'entries')
entries = json.loads(entries_json)
entries = [{
'_type': 'url',
'ie_key': NiconicoIE.ie_key(),
'url': ('http://www.nicovideo.jp/watch/%s' %
entry['item_data']['video_id']),
} for entry in entries]
return {
'_type': 'playlist',
'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
'id': list_id,
'entries': entries,
}
|
michalkurka/h2o-3 | refs/heads/master | h2o-py/tests/testdir_sklearn/pyunit_sklearn_classification_search_pipeline.py | 2 | from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_classification
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.pipeline import Pipeline
import h2o
from h2o.cross_validation import H2OKFold
from h2o.sklearn import h2o_connection, H2OGradientBoostingEstimator, H2OGradientBoostingClassifier, H2OSVD
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
scores = {}
def _get_data(format='numpy', n_classes=2):
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_classes=n_classes, random_state=seed)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def _h2o_accuracy(y_true, preds):
return accuracy_score(y_true.as_data_frame().values, preds.as_data_frame().values)
def test_h2o_only_pipeline_with_h2o_frames():
pipeline = Pipeline([
('svd', H2OSVD(seed=seed)),
('estimator', H2OGradientBoostingClassifier(seed=seed))
])
params = dict(
svd__nv=[2, 3],
svd__transform=['DESCALE', 'DEMEAN', 'NONE'],
estimator__ntrees=[5, 10],
estimator__max_depth=[1, 2, 3],
estimator__learn_rate=[0.1, 0.2],
)
search = RandomizedSearchCV(pipeline,
params,
n_iter=5,
random_state=seed,
n_jobs=1, # fails with parallel jobs
)
data = _get_data(format='h2o', n_classes=3)
assert isinstance(data.X_train, h2o.H2OFrame)
search.set_params(
scoring=make_scorer(_h2o_accuracy),
cv=H2OKFold(data.X_train, n_folds=3, seed=seed),
)
search.fit(data.X_train, data.y_train)
preds = search.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
probs = search.predict_proba(data.X_test)
assert probs.dim == [len(data.X_test), 3]
assert np.allclose(np.sum(probs.as_data_frame().values, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = search.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores['h2o_only_pipeline_with_h2o_frame'] = score
def test_h2o_only_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline.
# Also note that in this specific case mixing numpy inputs with a fully H2O pipeline,
# the last estimator requires the `data_conversion=True` param in order to return numpy arrays in predictions.
# Also, random search is never fitting on estimators but cloning them
# therefore if we need to ensure a connection, we can:
# - use `h2o_connection` context manager: with h2o_connection(): ...
# - use the H2O estimator itself as a context manager: with H2OEstimator() as est: ...
# - init h2o manually
with h2o_connection(**init_connection_args):
pipeline = Pipeline([
('svd', H2OSVD(seed=seed, init_connection_args=init_connection_args)),
('estimator', H2OGradientBoostingClassifier(seed=seed, data_conversion=True)),
])
params = dict(
svd__nv=[2, 3],
svd__transform=['DESCALE', 'DEMEAN', 'NONE'],
estimator__ntrees=[5, 10],
estimator__max_depth=[1, 2, 3],
estimator__learn_rate=[0.1, 0.2],
)
search = RandomizedSearchCV(pipeline,
params,
n_iter=5,
scoring='accuracy',
cv=3,
random_state=seed,
n_jobs=1,
)
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
search.fit(data.X_train, data.y_train)
preds = search.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = search.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = search.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores['h2o_only_pipeline_with_numpy_arrays'] = score
def test_mixed_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline
with h2o_connection(**init_connection_args):
pipeline = Pipeline([
('svd', TruncatedSVD(random_state=seed)),
('estimator', H2OGradientBoostingClassifier(seed=seed))
])
params = dict(
svd__n_components=[2, 3],
estimator__ntrees=[5, 10],
estimator__max_depth=[1, 2, 3],
estimator__learn_rate=[0.1, 0.2],
)
search = RandomizedSearchCV(pipeline,
params,
n_iter=5,
scoring='accuracy',
cv=3,
random_state=seed,
n_jobs=1,
)
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
search.fit(data.X_train, data.y_train)
preds = search.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = search.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = search.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores['mixed_pipeline_with_numpy_arrays'] = score
def _assert_test_scores_equivalent(lk, rk):
if lk in scores and rk in scores:
assert abs(scores[lk] - abs(scores[rk])) < 1e-6, \
"expected equivalent scores but got {lk}={lscore} and {rk}={rscore}" \
.format(lk=lk, rk=rk, lscore=scores[lk], rscore=scores[rk])
elif lk not in scores:
print("no scores for {}".format(lk))
else:
print("no scores for {}".format(rk))
def test_scores_are_equivalent():
_assert_test_scores_equivalent('h2o_only_pipeline_with_h2o_frame', 'h2o_only_pipeline_with_numpy_arrays')
pyunit_utils.run_tests([
test_h2o_only_pipeline_with_h2o_frames,
test_h2o_only_pipeline_with_numpy_arrays,
test_mixed_pipeline_with_numpy_arrays,
test_scores_are_equivalent,
])
|
MichaelNedzelsky/intellij-community | refs/heads/master | python/helpers/pydev/tests_mainloop/gui-gtk.py | 100 | #!/usr/bin/env python
"""Simple GTK example to manually test event loop integration.
To run this:
1) Enable the PyDev GUI event loop integration for gtk
2) do an execfile on this script
3) ensure you have a working GUI simultaneously with an
interactive console
"""
if __name__ == '__main__':
import pygtk
pygtk.require('2.0')
import gtk
def hello_world(wigdet, data=None):
print("Hello World")
def delete_event(widget, event, data=None):
return False
def destroy(widget, data=None):
gtk.main_quit()
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("delete_event", delete_event)
window.connect("destroy", destroy)
button = gtk.Button("Hello World")
button.connect("clicked", hello_world, None)
window.add(button)
button.show()
window.show()
|
paplorinc/intellij-community | refs/heads/master | python/testData/quickFixes/PyAddImportQuickFixTest/osPathFunctions/main_after.py | 27 | from os.path import join
join |
EvanK/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_category.py | 47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_category
short_description: Manage VMware categories
description:
- This module can be used to create / delete / update VMware categories.
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
- All variables and VMware object names are case sensitive.
version_added: '2.7'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
- vSphere Automation SDK
- vCloud Suite SDK
options:
category_name:
description:
- The name of category to manage.
required: True
category_description:
description:
- The category description.
- This is required only if C(state) is set to C(present).
- This parameter is ignored, when C(state) is set to C(absent).
default: ''
category_cardinality:
description:
- The category cardinality.
- This parameter is ignored, when updating existing category.
choices: ['multiple', 'single']
default: 'multiple'
new_category_name:
description:
- The new name for an existing category.
- This value is used while updating an existing category.
state:
description:
- The state of category.
- If set to C(present) and category does not exists, then category is created.
- If set to C(present) and category exists, then category is updated.
- If set to C(absent) and category exists, then category is deleted.
- If set to C(absent) and category does not exists, no action is taken.
- Process of updating category only allows name, description change.
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: vmware_rest_client.documentation
'''
EXAMPLES = r'''
- name: Create a category
vmware_category:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
category_name: Sample_Cat_0001
category_description: Sample Description
category_cardinality: 'multiple'
state: present
- name: Rename category
vmware_category:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
category_name: Sample_Category_0001
new_category_name: Sample_Category_0002
state: present
- name: Update category description
vmware_category:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
category_name: Sample_Category_0001
category_description: Some fancy description
state: present
- name: Delete category
vmware_category:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
category_name: Sample_Category_0002
state: absent
'''
RETURN = r'''
category_results:
description: dictionary of category metadata
returned: on success
type: dict
sample: {
"category_id": "urn:vmomi:InventoryServiceCategory:d7120bda-9fa5-4f92-9d71-aa1acff2e5a8:GLOBAL",
"msg": "Category NewCat_0001 updated."
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.cis.tagging_client import Category, CategoryModel
except ImportError:
pass
class VmwareCategory(VmwareRestClient):
def __init__(self, module):
super(VmwareCategory, self).__init__(module)
self.category_service = Category(self.connect)
self.global_categories = dict()
self.category_name = self.params.get('category_name')
self.get_all_categories()
def ensure_state(self):
"""Manage internal states of categories. """
desired_state = self.params.get('state')
states = {
'present': {
'present': self.state_update_category,
'absent': self.state_create_category,
},
'absent': {
'present': self.state_delete_category,
'absent': self.state_unchanged,
}
}
states[desired_state][self.check_category_status()]()
def state_create_category(self):
"""Create category."""
category_spec = self.category_service.CreateSpec()
category_spec.name = self.category_name
category_spec.description = self.params.get('category_description')
if self.params.get('category_cardinality') == 'single':
category_spec.cardinality = CategoryModel.Cardinality.SINGLE
else:
category_spec.cardinality = CategoryModel.Cardinality.MULTIPLE
category_spec.associable_types = set()
category_id = self.category_service.create(category_spec)
if category_id:
self.module.exit_json(changed=True,
category_results=dict(msg="Category '%s' created." % category_spec.name,
category_id=category_id))
self.module.exit_json(changed=False,
category_results=dict(msg="No category created", category_id=''))
def state_unchanged(self):
"""Return unchanged state."""
self.module.exit_json(changed=False)
def state_update_category(self):
"""Update category."""
category_id = self.global_categories[self.category_name]['category_id']
changed = False
results = dict(msg="Category %s is unchanged." % self.category_name,
category_id=category_id)
category_update_spec = self.category_service.UpdateSpec()
change_list = []
old_cat_desc = self.global_categories[self.category_name]['category_description']
new_cat_desc = self.params.get('category_description')
if new_cat_desc and new_cat_desc != old_cat_desc:
category_update_spec.description = new_cat_desc
results['msg'] = 'Category %s updated.' % self.category_name
change_list.append(True)
new_cat_name = self.params.get('new_category_name')
if new_cat_name in self.global_categories:
self.module.fail_json(msg="Unable to rename %s as %s already"
" exists in configuration." % (self.category_name, new_cat_name))
old_cat_name = self.global_categories[self.category_name]['category_name']
if new_cat_name and new_cat_name != old_cat_name:
category_update_spec.name = new_cat_name
results['msg'] = 'Category %s updated.' % self.category_name
change_list.append(True)
if any(change_list):
self.category_service.update(category_id, category_update_spec)
changed = True
self.module.exit_json(changed=changed,
category_results=results)
def state_delete_category(self):
"""Delete category."""
category_id = self.global_categories[self.category_name]['category_id']
self.category_service.delete(category_id=category_id)
self.module.exit_json(changed=True,
category_results=dict(msg="Category '%s' deleted." % self.category_name,
category_id=category_id))
def check_category_status(self):
"""
Check if category exists or not
Returns: 'present' if category found, else 'absent'
"""
if self.category_name in self.global_categories:
return 'present'
else:
return 'absent'
def get_all_categories(self):
"""Retrieve all category information."""
for category in self.category_service.list():
category_obj = self.category_service.get(category)
self.global_categories[category_obj.name] = dict(
category_description=category_obj.description,
category_used_by=category_obj.used_by,
category_cardinality=str(category_obj.cardinality),
category_associable_types=category_obj.associable_types,
category_id=category_obj.id,
category_name=category_obj.name,
)
def main():
argument_spec = VmwareRestClient.vmware_client_argument_spec()
argument_spec.update(
category_name=dict(type='str', required=True),
category_description=dict(type='str', default='', required=False),
category_cardinality=dict(type='str', choices=["multiple", "single"], default="multiple"),
new_category_name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
)
module = AnsibleModule(argument_spec=argument_spec)
vmware_category = VmwareCategory(module)
vmware_category.ensure_state()
if __name__ == '__main__':
main()
|
kosermedve/pokeapi | refs/heads/master | pokemon/models.py | 12 | from __future__ import unicode_literals
from django.db import models
from imagekit.models.fields import ProcessedImageField
from imagekit.processors import ResizeToFill
from .utils import unique_filename
class DateTimeModel(models.Model):
class Meta:
abstract = True
modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Ability(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
class Type(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
def _build_dict(self, items):
lst = []
for i in items:
lst.append(dict(
name=i.to.name,
resource_uri='/api/v1/type/' + str(i.to.id) + '/'
))
return lst
def weakness_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='weak')
if items.exists():
return self._build_dict(items)
return []
weaknesses = property(fget=weakness_list)
def resistances_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='resist')
if items.exists():
return self._build_dict(items)
return []
resistances = property(fget=resistances_list)
def super_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='super effective')
if items.exists():
return self._build_dict(items)
return []
supers = property(fget=super_list)
def ineffective_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='ineffective')
if items.exists():
return self._build_dict(items)
return []
ineffectives = property(fget=ineffective_list)
def no_list(self):
items = TypeChart.objects.filter(
frm__name=self.name,
ttype='noeffect')
if items.exists():
return self._build_dict(items)
return []
no_effects = property(fget=no_list)
class TypeChart(DateTimeModel):
def __unicode__(self):
return ' '.join([self.frm.name, self.ttype, 'against', self.to.name])
frm = models.ForeignKey(
Type, blank=True, null=True, related_name='type_frm')
to = models.ForeignKey(
Type, blank=True, null=True, related_name='type_to')
TYPES = (
('weak', 'weak'),
('super effective', 'super effective'),
('resistant', 'resistant'),
('ineffective', 'ineffective'),
('noeffect', 'noeffect'),
('resist', 'resist'),
)
ttype = models.CharField(
max_length=15, choices=TYPES, blank=True, null=True)
class EggGroup(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
def get_pokes(self):
pokes = Pokemon.objects.filter(
egg_group=self
)
lst = []
if pokes.exists():
for p in pokes:
lst.append(dict(
name=p.name.capitalize(),
resource_uri='/api/v1/pokemon/' + str(p.pkdx_id) + '/'
))
return lst
pokemon = property(fget=get_pokes)
class Game(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
generation = models.IntegerField(max_length=4)
release_year = models.IntegerField(max_length=6)
class Description(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
game = models.ManyToManyField(Game, blank=True, null=True)
def get_game_details(self):
lst = []
for g in self.game.all():
lst.append(dict(
name=g.name,
resource_uri='/api/v1/game/' + str(g.id) + '/')
)
return lst
n_game = property(fget=get_game_details)
def get_pokemon(self):
nm = self.name.split('_')[0]
pokes = Pokemon.objects.filter(
name=nm.lower()
)
if pokes.exists():
return dict(
name=pokes[0].name,
resource_uri='/api/v1/pokemon/' + str(pokes[0].pkdx_id) + '/')
return []
pokemon = property(fget=get_pokemon)
class Move(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
description = models.TextField(max_length=200)
etype = models.ManyToManyField(Type, null=True)
pp = models.IntegerField(max_length=5)
CATEGORY = (
('physical', 'physical'),
('special', 'special'),
('status', 'status'),
)
category = models.CharField(choices=CATEGORY, max_length=10)
power = models.IntegerField(max_length=6)
accuracy = models.IntegerField(max_length=6)
class Sprite(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=50)
image = ProcessedImageField(
[ResizeToFill(96, 96)],
upload_to=unique_filename,
format='PNG',
options={'quality': 80})
def get_pokemon(self):
nm = self.name.split('_')[0]
pokes = Pokemon.objects.filter(
name=nm.lower()
)
if pokes.exists():
return dict(
name=pokes[0].name,
resource_uri='/api/v1/pokemon/' + str(pokes[0].pkdx_id) + '/')
return []
pokemon = property(fget=get_pokemon)
class Pokemon(DateTimeModel):
def __unicode__(self):
return ' - '.join([str(self.pkdx_id), self.name])
name = models.CharField(max_length=50)
pkdx_id = models.IntegerField(max_length=4, blank=True)
species = models.CharField(max_length=30)
height = models.CharField(max_length=10)
weight = models.CharField(max_length=10)
ev_yield = models.CharField(max_length=20)
catch_rate = models.IntegerField(max_length=4)
happiness = models.IntegerField(max_length=4)
exp = models.IntegerField(max_length=5)
GROWTHS = (
('slow', 'slow'),
('medium slow', 'medium slow'),
('medium', 'medium'),
('medium fast', 'medium fast'),
('fast', 'fast'),
)
growth_rate = models.CharField(choices=GROWTHS, max_length=15)
male_female_ratio = models.CharField(max_length=10)
hp = models.IntegerField(max_length=4)
attack = models.IntegerField(max_length=4)
defense = models.IntegerField(max_length=4)
sp_atk = models.IntegerField(max_length=4)
sp_def = models.IntegerField(max_length=4)
speed = models.IntegerField(max_length=4)
total = models.IntegerField(max_length=6)
egg_cycles = models.IntegerField(max_length=6)
abilities = models.ManyToManyField(
Ability, blank=True, null=True)
def ability_names(self):
lst = []
for a in self.abilities.all():
lst.append(dict(
resource_uri='/api/v1/ability/' + str(a.id) + '/',
name=a.name.lower())
)
return lst
ability_list = property(fget=ability_names)
def get_evolution_details(self):
evols = Evolution.objects.filter(
frm=self
)
if evols.exists():
lst = []
for e in evols:
d = dict(
to=e.to.name.capitalize(),
resource_uri='/api/v1/pokemon/' + str(e.to.pkdx_id) + '/',
method=e.method,
)
if e.level > 0:
d['level'] = e.level
if e.detail:
d['detail'] = e.detail
lst.append(d)
return lst
return []
evolutions = property(fget=get_evolution_details)
types = models.ManyToManyField(
Type, blank=True, null=True)
def type_list(self):
lst = []
for t in self.types.all():
lst.append(dict(
resource_uri='/api/v1/type/' + str(t.id) + '/',
name=t.name.lower())
)
return lst
type_list = property(fget=type_list)
egg_group = models.ManyToManyField(
EggGroup, blank=True, null=True)
def get_eggs(self):
lst = []
for e in self.egg_group.all():
lst.append(dict(
name=e.name.capitalize(),
resource_uri='/api/v1/egg/' + str(e.id) + '/'
))
return lst
eggs = property(fget=get_eggs)
descriptions = models.ManyToManyField(
Description, blank=True, null=True)
def get_sprites(self):
lst = []
for s in self.sprites.all():
lst.append(dict(
name=self.name,
resource_uri='/api/v1/sprite/' + str(s.id) + '/')
)
return lst
my_sprites = property(fget=get_sprites)
sprites = models.ManyToManyField(
Sprite, blank=True, null=True)
def get_moves(self):
moves = MovePokemon.objects.filter(
pokemon=self
)
lst = []
if moves.exists():
for m in moves:
d = dict(
name=m.move.name.capitalize(),
resource_uri='/api/v1/move/' + str(m.move.id) + '/',
learn_type=m.learn_type
)
if m.level > 0:
d['level'] = m.level
lst.append(d)
return lst
moves = property(fget=get_moves)
class Evolution(DateTimeModel):
def __unicode__(self):
return self.frm.name + ' to ' + self.to.name
frm = models.ForeignKey(
Pokemon, null=True, blank=True,
related_name='frm_evol_pokemon')
to = models.ForeignKey(
Pokemon, null=True, blank=True,
related_name='to_evol_pokemon')
EVOLV_METHODS = (
('level up', 'level_up'),
('stone', 'stone'),
('trade', 'trade'),
('other', 'other'),
)
level = models.IntegerField(max_length=3, default=0)
method = models.CharField(
choices=EVOLV_METHODS, max_length=10, default=0)
detail = models.CharField(max_length=10, null=True, blank=True)
class MovePokemon(DateTimeModel):
def __unicode__(self):
return self.pokemon.name + ' - ' + self.move.name
pokemon = models.ForeignKey(
Pokemon, related_name='move', null=True, blank=True)
move = models.ForeignKey(
Move, related_name='pokemon', null=True, blank=True)
LEARN = (
('level up', 'level up'),
('machine', 'machine'),
('egg move', 'egg move'),
('tutor', 'tutor'),
('other', 'other'),
)
learn_type = models.CharField(
choices=LEARN, max_length=15, default='level up')
level = models.IntegerField(
max_length=6, default=0, null=True, blank=True)
class Pokedex(DateTimeModel):
def __unicode__(self):
return self.name
name = models.CharField(max_length=60)
def _all_pokes(self):
lst = []
for p in Pokemon.objects.all():
lst.append(dict(
name=p.name,
resource_uri='api/v1/pokemon/' + str(p.pkdx_id) + '/'
))
return lst
pokemon = property(fget=_all_pokes)
|
isabellaleehs/isabellaleehs.github.io | refs/heads/gh-pages | projects/webapp1/flask/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py | 355 | # -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
2014c2g3/0623exam | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/base_1.py | 603 | #!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
KevinFuU/flasky | refs/heads/master | migrations/versions/190163627111_account_confirmation.py | 144 | """account confirmation
Revision ID: 190163627111
Revises: 456a945560f6
Create Date: 2013-12-29 02:58:45.577428
"""
# revision identifiers, used by Alembic.
revision = '190163627111'
down_revision = '456a945560f6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('confirmed', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'confirmed')
### end Alembic commands ###
|
ikaee/bfr-attendant | refs/heads/master | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/distributions/__init__.py | 2 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes representing statistical distributions and ops for working with them.
See the @{$python/contrib.distributions} guide.
## Distribution Object
@@ReparameterizationType
@@Distribution
## Individual Distributions
@@Binomial
@@Bernoulli
@@BernoulliWithSigmoidProbs
@@Beta
@@BetaWithSoftplusConcentration
@@Categorical
@@Chi2
@@Chi2WithAbsDf
@@Deterministic
@@VectorDeterministic
@@Exponential
@@ExponentialWithSoftplusRate
@@Gamma
@@GammaWithSoftplusConcentrationRate
@@Geometric
@@InverseGamma
@@InverseGammaWithSoftplusConcentrationRate
@@Laplace
@@LaplaceWithSoftplusScale
@@Logistic
@@NegativeBinomial
@@Normal
@@NormalWithSoftplusScale
@@Poisson
@@StudentT
@@StudentTWithAbsDfSoftplusScale
@@Uniform
@@MultivariateNormalDiag
@@MultivariateNormalTriL
@@MultivariateNormalDiagPlusLowRank
@@MultivariateNormalDiagWithSoftplusScale
@@Dirichlet
@@DirichletMultinomial
@@Multinomial
@@WishartCholesky
@@WishartFull
@@TransformedDistribution
@@QuantizedDistribution
@@Mixture
@@ExpRelaxedOneHotCategorical
@@OneHotCategorical
@@RelaxedBernoulli
@@RelaxedOneHotCategorical
## Kullback-Leibler Divergence
@@kl
@@RegisterKL
## Helper Functions
@@matrix_diag_transform
@@normal_conjugates_known_scale_posterior
@@normal_conjugates_known_scale_predictive
@@softplus_inverse
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.distributions.python.ops.bernoulli import *
from tensorflow.contrib.distributions.python.ops.beta import *
from tensorflow.contrib.distributions.python.ops.bijectors import *
from tensorflow.contrib.distributions.python.ops.binomial import *
from tensorflow.contrib.distributions.python.ops.categorical import *
from tensorflow.contrib.distributions.python.ops.chi2 import *
from tensorflow.contrib.distributions.python.ops.conditional_distribution import *
from tensorflow.contrib.distributions.python.ops.conditional_transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.deterministic import *
from tensorflow.contrib.distributions.python.ops.dirichlet import *
from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import *
from tensorflow.contrib.distributions.python.ops.distribution import *
from tensorflow.contrib.distributions.python.ops.distribution_util import matrix_diag_transform
from tensorflow.contrib.distributions.python.ops.distribution_util import softplus_inverse
from tensorflow.contrib.distributions.python.ops.exponential import *
from tensorflow.contrib.distributions.python.ops.gamma import *
from tensorflow.contrib.distributions.python.ops.geometric import *
from tensorflow.contrib.distributions.python.ops.inverse_gamma import *
from tensorflow.contrib.distributions.python.ops.kullback_leibler import *
from tensorflow.contrib.distributions.python.ops.laplace import *
from tensorflow.contrib.distributions.python.ops.logistic import *
from tensorflow.contrib.distributions.python.ops.mixture import *
from tensorflow.contrib.distributions.python.ops.multinomial import *
from tensorflow.contrib.distributions.python.ops.mvn_diag import *
from tensorflow.contrib.distributions.python.ops.mvn_diag_plus_low_rank import *
from tensorflow.contrib.distributions.python.ops.mvn_tril import *
from tensorflow.contrib.distributions.python.ops.negative_binomial import *
from tensorflow.contrib.distributions.python.ops.normal import *
from tensorflow.contrib.distributions.python.ops.normal_conjugate_posteriors import *
from tensorflow.contrib.distributions.python.ops.onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.poisson import *
from tensorflow.contrib.distributions.python.ops.quantized_distribution import *
from tensorflow.contrib.distributions.python.ops.relaxed_bernoulli import *
from tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.student_t import *
from tensorflow.contrib.distributions.python.ops.transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.uniform import *
from tensorflow.contrib.distributions.python.ops.wishart import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['ConditionalDistribution',
'ConditionalTransformedDistribution',
'FULLY_REPARAMETERIZED', 'NOT_REPARAMETERIZED']
remove_undocumented(__name__, _allowed_symbols)
|
jkadlec/knot-dns-zoneapi | refs/heads/master | tests-extra/tests/basic/flags/test.py | 2 | #!/usr/bin/env python3
'''Test for header flags in response'''
from dnstest.test import Test
t = Test()
knot = t.server("knot")
bind = t.server("bind")
zone = t.zone("flags.")
# Disable ANY over UDP
knot.disable_any = True
t.link(zone, knot)
t.link(zone, bind)
t.start()
# RD flag preservation.
resp = knot.dig("flags", "NS", flags="RD")
resp.check(flags="QR AA RD", noflags="TC RA AD CD")
resp.cmp(bind)
# CD flag preservation.
resp = knot.dig("flags", "NS", flags="CD")
resp.check(flags="QR AA CD", noflags="TC RA AD RD")
resp.cmp(bind)
# TC flag must be cleared
resp = knot.dig("flags", "NS", flags="TC")
resp.check(flags="QR AA", noflags="TC RA AD CD RD")
resp.cmp(bind)
# AD flag must be cleared
resp = knot.dig("flags", "NS", flags="AD")
resp.check(flags="QR AA", noflags="TC RA AD CD RD")
resp.cmp(bind)
# AA flag must be cleared
resp = knot.dig("sub.flags", "NS", flags="AA")
resp.check(flags="QR", noflags="AA TC RD RA AD CD")
resp.cmp(bind)
# RA flag must be cleared
resp = knot.dig("flags", "NS", flags="RA")
resp.check(flags="QR AA", noflags="TC RA AD CD RD")
resp.cmp(bind)
# NS record for delegated subdomain (not authoritative).
resp = knot.dig("sub.flags", "NS")
resp.check(flags="QR", noflags="AA TC RD RA AD CD")
resp.cmp(bind)
# Glue record for delegated subdomain (not authoritative).
resp = knot.dig("ns.sub.flags", "A")
resp.check(flags="QR", noflags="AA TC RD RA AD CD")
resp.cmp(bind)
# Check maximal UDP payload which fits into a response message.
resp = knot.dig("512resp.flags", "TXT", udp=True)
resp.check(flags="QR AA", noflags="TC RD RA AD CD")
resp.cmp(bind, flags=False) # Bind returns TC compared to Knot!
# TC bit - UDP.
resp = knot.dig("513resp.flags", "TXT", udp=True)
resp.check(flags="QR AA TC", noflags="RD RA AD CD")
resp.cmp(bind, authority=False) # Knot puts SOA compared to Bind!
# No TC bit - TCP.
resp = knot.dig("513resp.flags", "TXT", udp=False)
resp.check(flags="QR AA", noflags="TC RD RA AD CD")
resp.cmp(bind)
# Check ANY over UDP (expects TC=1)
resp = knot.dig("flags", "ANY", udp=True)
resp.check(flags="QR AA TC", noflags="RD RA AD CD")
# Check ANY over TCP(expects TC=0)
resp = knot.dig("flags", "ANY", udp=False)
resp.check(flags="QR AA", noflags="TC RD RA AD CD")
t.end()
|
HyperGroups/pelican | refs/heads/master | pelican/tests/test_importer.py | 15 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import re
import locale
from codecs import open
from pelican.tools.pelican_import import wp2fields, fields2pelican, decode_wp_content, build_header, build_markdown_header, get_attachments, download_attachments
from pelican.tests.support import (unittest, temporary_folder, mute,
skipIfNoExecutable)
from pelican.utils import slugify, path_to_file_url
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_encoded')
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_decoded')
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = False # NOQA
try:
import bs4.builder._lxml as LXML
except ImportError:
LXML = False
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
class TestWordpressXmlImporter(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
self.posts = list(wp2fields(WORDPRESS_XML_SAMPLE))
self.custposts = list(wp2fields(WORDPRESS_XML_SAMPLE, True))
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
if kind == 'page':
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1])
def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
fname = list(silent_f2p(test_post, 'markdown', temp, dirpage=True))[0]
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
def test_dircat(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.posts:
# check post kind
if len(post[5]) > 0: # Has a category
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, dircat=True))
index = 0
for post in test_posts:
name = post[2]
category = slugify(post[5][0])
name += '.md'
filename = os.path.join(category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
if kind == 'page' or kind == 'article':
pass
else:
pages_data.append((title, fname))
self.assertEqual(0, len(pages_data))
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.custposts:
if kind == 'article' or kind == 'page':
pass
else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
self.assertEqual(('A custom post in category 4', 'custom1'), cust_data[0])
self.assertEqual(('A custom post in category 5', 'custom1'), cust_data[1])
self.assertEqual(('A 2nd custom post type also in category 5', 'custom2'), cust_data[2])
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, wp_custpost = True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
name += '.md'
filename = os.path.join(kind, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dircat=True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
category = slugify(post[5][0])
name += '.md'
filename = os.path.join(kind, category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
index += 1
def test_wp_custpost_true_dirpage_false(self):
#pages should only be put in their own directory when dirpage = True
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'page':
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dirpage=False))
index = 0
for post in test_posts:
name = post[2]
name += '.md'
filename = os.path.join('pages', name)
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
def test_can_toggle_raw_html_code_parsing(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp))
self.assertTrue(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp,
strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
# no effect in rst
rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp,
strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self):
test_posts = [post for post in self.posts if post[2] == 'html-entity-test']
self.assertEqual(len(test_posts), 1)
post = test_posts[0]
title = post[0]
self.assertTrue(title, "A normal post with some <html> entities in the"
" title. You can't miss them.")
self.assertNotIn('&', title)
def test_decode_wp_content_returns_empty(self):
""" Check that given an empty string we return an empty string."""
self.assertEqual(decode_wp_content(""), "")
def test_decode_wp_content(self):
""" Check that we can decode a wordpress content string."""
with open(WORDPRESS_ENCODED_CONTENT_SAMPLE, 'r') as encoded_file:
encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE, 'r') as decoded_file:
decoded_content = decoded_file.read()
self.assertEqual(decode_wp_content(encoded_content, br=False), decoded_content)
def test_preserve_verbatim_formatting(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
self.assertTrue(re.search(r'\s+b = \[4, 5, 6\]', md))
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0)
print_line = re.search(r'\s+print i', md).group(0)
self.assertTrue(for_line.rindex('for') < print_line.rindex('print'))
def test_code_in_list(self):
def r(f):
with open(f, encoding='utf-8') as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
sample_line = re.search(r'- This is a code sample', md).group(0)
code_line = re.search(r'\s+a = \[1, 2, 3\]', md).group(0)
self.assertTrue(sample_line.rindex('This') < code_line.rindex('a'))
class TestBuildHeader(unittest.TestCase):
def test_build_header(self):
header = build_header('test', None, None, None, None, None)
self.assertEqual(header, 'test\n####\n\n')
def test_build_header_with_fields(self):
header_data = [
'Test Post',
'2014-11-04',
'Alexis Métaireau',
['Programming'],
['Pelican', 'Python'],
'test-post',
]
expected_docutils = '\n'.join([
'Test Post',
'#########',
':date: 2014-11-04',
':author: Alexis Métaireau',
':category: Programming',
':tags: Pelican, Python',
':slug: test-post',
'\n',
])
expected_md = '\n'.join([
'Title: Test Post',
'Date: 2014-11-04',
'Author: Alexis Métaireau',
'Category: Programming',
'Tags: Pelican, Python',
'Slug: test-post',
'\n',
])
self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self):
header = build_header('これは広い幅の文字だけで構成されたタイトルです',
None, None, None, None, None)
self.assertEqual(header,
'これは広い幅の文字だけで構成されたタイトルです\n' +
'##############################################\n\n')
def test_galleries_added_to_header(self):
header = build_header('test', None, None, None, None,
None, attachments=['output/test1', 'output/test2'])
self.assertEqual(header, 'test\n####\n' + ':attachments: output/test1, '
+ 'output/test2\n\n')
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header('test', None, None, None, None, None,
attachments=['output/test1', 'output/test2'])
self.assertEqual(header, 'Title: test\n' + 'Attachments: output/test1, '
+ 'output/test2\n\n')
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@unittest.skipUnless(LXML, 'Needs lxml module')
class TestWordpressXMLAttachements(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
self.attachments = get_attachments(WORDPRESS_XML_SAMPLE)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_recognise_attachments(self):
self.assertTrue(self.attachments)
self.assertTrue(len(self.attachments.keys()) == 3)
def test_attachments_associated_with_correct_post(self):
self.assertTrue(self.attachments)
for post in self.attachments.keys():
if post is None:
self.assertTrue(self.attachments[post][0] == 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Pelican_lakes_entrance02.jpg/240px-Pelican_lakes_entrance02.jpg')
elif post == 'with-excerpt':
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain/not_an_image.jpg')
self.assertTrue(self.attachments[post][1] == 'http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg')
elif post == 'with-tags':
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain')
else:
self.fail('all attachments should match to a filename or None, {}'.format(post))
def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, 'content/article.rst')
good_url = path_to_file_url(real_file)
bad_url = 'http://localhost:1/not_a_file.txt'
silent_da = mute()(download_attachments)
with temporary_folder() as temp:
locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations))
directory = locations[0]
self.assertTrue(directory.endswith(os.path.join('content', 'article.rst')), directory)
|
blaskovic/systemd-rhel | refs/heads/master | src/python-systemd/journal.py | 39 | # -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, files=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next() or raises StopIteration.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 1000000
return super(Reader, self).seek_realtime(int(realtime))
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either
seconds or a datetime.timedelta instance. Argument `bootid`
is a string or UUID representing which boot the monotonic time
is reference to. Defaults to current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.totalseconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID equal to current boot ID or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.hex
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
elif isinstance(value, int):
return field + '=' + str(value)
else:
return field + '=' + value
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier, priority=LOG_DEBUG, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted
text strings are written to the journal.
The file will be line buffered, so messages are actually sent
after a newline character is written.
>>> stream = journal.stream('myapp')
>>> stream
<open file '<fdopen>', mode 'w' at 0x...>
>>> stream.write('message...\n')
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
Using the interface with print might be more convinient:
>>> from __future__ import print_function
>>> print('message...', file=stream)
priority is the syslog priority, one of `LOG_EMERG`,
`LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`,
`LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority
level prefixes (such as '<1>') are interpreted. See
sd-daemon(3) for more information.
"""
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an
overview: http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(journal.JournalHandler())
>>> log.warn("Some message: %s", detail)
Note that by default, message levels `INFO` and `DEBUG` are
ignored by the logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where
they come from, attach it to the root logger:
>>> logging.root.addHandler(journal.JournalHandler())
For more complex configurations when using `dictConfig` or
`fileConfig`, specify `systemd.journal.JournalHandler` as the
handler class. Only standard handler configuration options
are supported: `level`, `formatter`, `filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warn("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this
handler can be specified as keyword arguments. This probably
makes sense only for SYSLOG_IDENTIFIER and similar fields
which are constant for the whole program:
>>> journal.JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
The following journal fields will be sent:
`MESSAGE`, `PRIORITY`, `THREAD_NAME`, `CODE_FILE`, `CODE_LINE`,
`CODE_FUNC`, `LOGGER` (name as supplied to getLogger call),
`MESSAGE_ID` (optional, see above), `SYSLOG_IDENTIFIER` (defaults
to sys.argv[0]).
"""
def __init__(self, level=_logging.NOTSET, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self._extra = kwargs
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**self._extra)
except Exception:
self.handleError(record)
@staticmethod
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
|
talhaburak/Arduino | refs/heads/master | arduino-core/src/processing/app/i18n/python/requests/packages/charade/langhebrewmodel.py | 168 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
BenSimner/speccer | refs/heads/master | tests/test_misc.py | 1 | from speccer.misc import *
def with_return(g, r):
yield from g
return r
def from_gen(g):
xs = []
try:
while True:
xs.append(next(g))
except StopIteration as e:
return xs, e.value
def test_with_and_from():
assert from_gen(with_return(range(3), 2)) == ([0, 1, 2], 2)
def test_intersperse_multi_ranges():
'''Tests that intersperse reflects the returned values
'''
r_range1 = with_return(range(3), 1)
r_range2 = with_return(range(5), 2)
insp = intersperse([r_range1, r_range2])
xs, v = from_gen(insp)
assert xs == [0, 0, 1, 1, 2, 2, 3, 4,]
assert v == (1, 2,)
def test_intersperse_multi_ranges_invert():
'''Test that order of returns is preserved over iteration regardless of order of iteration
'''
r_range1 = with_return(range(3), 1)
r_range2 = with_return(range(7), 2)
r_range3 = with_return(range(2), 3)
r_range4 = with_return(range(1), 4)
insp = intersperse([r_range1, r_range2, r_range3, r_range4])
xs, v = from_gen(insp)
assert xs == [0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 6,]
assert v == (1, 2, 3, 4,)
def test_intersperse_order():
a = ['a', 'b', 'c']
b = [0, 1, 2, 3]
c = [True, False]
insp = intersperse([a, b, c])
xs, v = from_gen(insp)
assert xs == ['a', 0, True, 'b', 1, False, 'c', 2, 3]
assert v == (None, None, None)
|
proversity-org/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/entrance_exam.py | 12 | """
Entrance Exams view module -- handles all requests related to entrance exam management via Studio
Intended to be utilized as an AJAX callback handler, versus a proper view/screen
"""
import logging
from functools import wraps
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from contentstore.views.helpers import create_xblock, remove_entrance_exam_graders
from contentstore.views.item import delete_item
from models.settings.course_metadata import CourseMetadata
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student.auth import has_course_author_access
from util import milestones_helpers
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
__all__ = ['entrance_exam', ]
log = logging.getLogger(__name__)
# pylint: disable=invalid-name
def _get_default_entrance_exam_minimum_pct():
"""
Helper method to return the default value from configuration
Converts integer values to decimals, since that what we use internally
"""
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
return entrance_exam_minimum_score_pct
# pylint: disable=missing-docstring
def check_feature_enabled(feature_name):
"""
Ensure the specified feature is turned on. Return an HTTP 400 code if not.
"""
def _check_feature_enabled(view_func):
def _decorator(request, *args, **kwargs):
# Deny access if the entrance exam feature is disabled
if not settings.FEATURES.get(feature_name, False):
return HttpResponseBadRequest()
return view_func(request, *args, **kwargs)
return wraps(view_func)(_decorator)
return _check_feature_enabled
@login_required
@ensure_csrf_cookie
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def entrance_exam(request, course_key_string):
"""
The restful handler for entrance exams.
It allows retrieval of all the assets (as an HTML page), as well as uploading new assets,
deleting assets, and changing the "locked" state of an asset.
GET
Retrieves the entrance exam module (metadata) for the specified course
POST
Adds an entrance exam module to the specified course.
DELETE
Removes the entrance exam from the course
"""
course_key = CourseKey.from_string(course_key_string)
# Deny access if the user is valid, but they lack the proper object access privileges
if not has_course_author_access(request.user, course_key):
return HttpResponse(status=403)
# Retrieve the entrance exam module for the specified course (returns 404 if none found)
if request.method == 'GET':
return _get_entrance_exam(request, course_key)
# Create a new entrance exam for the specified course (returns 201 if created)
elif request.method == 'POST':
response_format = request.POST.get('format', 'html')
http_accept = request.META.get('http_accept')
if response_format == 'json' or 'application/json' in http_accept:
ee_min_score = request.POST.get('entrance_exam_minimum_score_pct', None)
# if request contains empty value or none then save the default one.
entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct()
if ee_min_score != '' and ee_min_score is not None:
entrance_exam_minimum_score_pct = float(ee_min_score)
return create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
return HttpResponse(status=400)
# Remove the entrance exam module for the specified course (returns 204 regardless of existence)
elif request.method == 'DELETE':
return delete_entrance_exam(request, course_key)
# No other HTTP verbs/methods are supported at this time
else:
return HttpResponse(status=405)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct):
"""
api method to create an entrance exam.
First clean out any old entrance exams.
"""
_delete_entrance_exam(request, course_key)
return _create_entrance_exam(
request=request,
course_key=course_key,
entrance_exam_minimum_score_pct=entrance_exam_minimum_score_pct
)
def _create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct=None):
"""
Internal workflow operation to create an entrance exam
"""
# Provide a default value for the minimum score percent if nothing specified
if entrance_exam_minimum_score_pct is None:
entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct()
# Confirm the course exists
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
# Create the entrance exam item (currently it's just a chapter)
parent_locator = unicode(course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=request.user,
category='chapter',
display_name=_('Entrance Exam'),
is_entrance_exam=True
)
# Set the entrance exam metadata flags for this course
# Reload the course so we don't overwrite the new child reference
course = modulestore().get_course(course_key)
metadata = {
'entrance_exam_enabled': True,
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct,
'entrance_exam_id': unicode(created_block.location),
}
CourseMetadata.update_from_dict(metadata, course, request.user)
# Create the entrance exam section item.
create_xblock(
parent_locator=unicode(created_block.location),
user=request.user,
category='sequential',
display_name=_('Entrance Exam - Subsection')
)
add_entrance_exam_milestone(course.id, created_block)
return HttpResponse(status=201)
def _get_entrance_exam(request, course_key): # pylint: disable=W0613
"""
Internal workflow operation to retrieve an entrance exam
"""
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
if not course.entrance_exam_id:
return HttpResponse(status=404)
try:
exam_key = UsageKey.from_string(course.entrance_exam_id)
except InvalidKeyError:
return HttpResponse(status=404)
try:
exam_descriptor = modulestore().get_item(exam_key)
return HttpResponse(
dump_js_escaped_json({'locator': unicode(exam_descriptor.location)}),
status=200, content_type='application/json')
except ItemNotFoundError:
return HttpResponse(status=404)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def update_entrance_exam(request, course_key, exam_data):
"""
Operation to update course fields pertaining to entrance exams
The update operation is not currently exposed directly via the API
Because the operation is not exposed directly, we do not return a 200 response
But we do return a 400 in the error case because the workflow is executed in a request context
"""
course = modulestore().get_course(course_key)
if course:
metadata = exam_data
CourseMetadata.update_from_dict(metadata, course, request.user)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def delete_entrance_exam(request, course_key):
"""
api method to delete an entrance exam
"""
return _delete_entrance_exam(request=request, course_key=course_key)
def _delete_entrance_exam(request, course_key):
"""
Internal workflow operation to remove an entrance exam
"""
store = modulestore()
course = store.get_course(course_key)
if course is None:
return HttpResponse(status=400)
remove_entrance_exam_milestone_reference(request, course_key)
# Reset the entrance exam flags on the course
# Reload the course so we have the latest state
course = store.get_course(course_key)
if course.entrance_exam_id:
metadata = {
'entrance_exam_enabled': False,
'entrance_exam_minimum_score_pct': None,
'entrance_exam_id': None,
}
CourseMetadata.update_from_dict(metadata, course, request.user)
# Clean up any pre-existing entrance exam graders
remove_entrance_exam_graders(course_key, request.user)
return HttpResponse(status=204)
def add_entrance_exam_milestone(course_id, x_block):
# Add an entrance exam milestone if one does not already exist for given xBlock
# As this is a standalone method for entrance exam, We should check that given xBlock should be an entrance exam.
if x_block.is_entrance_exam:
namespace_choices = milestones_helpers.get_namespace_choices()
milestone_namespace = milestones_helpers.generate_milestone_namespace(
namespace_choices.get('ENTRANCE_EXAM'),
course_id
)
milestones = milestones_helpers.get_milestones(milestone_namespace)
if len(milestones):
milestone = milestones[0]
else:
description = 'Autogenerated during {} entrance exam creation.'.format(unicode(course_id))
milestone = milestones_helpers.add_milestone({
'name': _('Completed Course Entrance Exam'),
'namespace': milestone_namespace,
'description': description
})
relationship_types = milestones_helpers.get_milestone_relationship_types()
milestones_helpers.add_course_milestone(
unicode(course_id),
relationship_types['REQUIRES'],
milestone
)
milestones_helpers.add_course_content_milestone(
unicode(course_id),
unicode(x_block.location),
relationship_types['FULFILLS'],
milestone
)
def remove_entrance_exam_milestone_reference(request, course_key):
"""
Remove content reference for entrance exam.
"""
course_children = modulestore().get_items(
course_key,
qualifiers={'category': 'chapter'}
)
for course_child in course_children:
if course_child.is_entrance_exam:
delete_item(request, course_child.scope_ids.usage_id)
milestones_helpers.remove_content_references(unicode(course_child.scope_ids.usage_id))
|
indictranstech/vestasi-erpnext | refs/heads/develop | erpnext/hr/doctype/appraisal_template_goal/appraisal_template_goal.py | 41 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AppraisalTemplateGoal(Document):
pass |
beiko-lab/gengis | refs/heads/master | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/pubsub/core/__init__.py | 7 | '''
Core package of pubsub, holding the publisher, listener, and topic
object modules. Functions defined here are used internally by
pubsub so that the right modules can be found later, based on the
selected messaging protocol.
Indeed some of the API depends on the messaging
protocol used. For instance sendMessage(), defined in publisher.py,
has a different signature (and hence implementation) for the kwargs
protocol than for the arg1 protocol.
The most convenient way to
support this is to put the parts of the package that differ based
on protocol in separate folders, and add one of those folders to
the package's __path__ variable (defined automatically by the Python
interpreter when __init__.py is executed). For instance, code
specific to the kwargs protocol goes in the kwargs folder, and code
specific to the arg1 protocol in the arg1 folder. Then when doing
"from pubsub.core import listener", the correct listener.py will be
found for the specified protocol. The default protocol is kwargs.
Only one protocol can be used in an application. The default protocol,
if none is chosen by user, is kwargs, as selected by the call to
_prependModulePath() at end of this file.
:copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
def setMsgProtocol(protocol):
import policies
policies.msgDataProtocol = protocol
# add appropriate subdir for protocol-specific implementation
if protocol == 'kwargs':
_replaceModulePath0('kwargs')
else:
_replaceModulePath0('arg1')
def setMsgDataArgName(stage, listenerArgName, senderArgNameAny=False):
import policies
policies.senderKwargNameAny = senderArgNameAny
policies.msgDataArgName = listenerArgName
policies.msgProtocolTransStage = stage
#print `policies.msgProtocolTransStage`, `policies.msgDataProtocol`, \
# `policies.senderKwargNameAny`, `policies.msgDataArgName`
#print 'override "arg1" protocol arg name:', argName
def _replaceModulePath0(dirname):
'''Replace the first package-path item (in __path__) with dirname.
The dirname will be prepended with the package's path, assumed to
be the last item in __path__.'''
corepath = __path__
assert len(corepath) > 1
initpyLoc = corepath[-1]
import os
corepath[0] = os.path.join(initpyLoc, dirname)
def _prependModulePath(extra):
'''Insert extra at beginning of package's path list. Should only be
called once, at package load time, to set the folder used for
implementation specific to the default message protocol.'''
corepath = __path__
initpyLoc = corepath[-1]
import os
corepath.insert(0, os.path.join(initpyLoc, extra))
# default protocol:
_prependModulePath('kwargs')
|
EthanHeilman/bitcoin | refs/heads/master | test/functional/feature_notifications.py | 4 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
# Linux allow all characters other than \x00
# Windows disallow control characters (0-31) and /\?%:|"<>
FILE_CHAR_START = 32 if os.name == 'nt' else 1
FILE_CHAR_END = 128
FILE_CHARS_DISALLOWED = '/\\?%*:|"<>' if os.name == 'nt' else '/'
def notify_outputname(walletname, txid):
return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.wallet = ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHARS_DISALLOWED)
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
os.mkdir(self.alertnotify_dir)
os.mkdir(self.blocknotify_dir)
os.mkdir(self.walletnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
"-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')),
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')),
], [
"-rescan",
"-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))),
]]
self.wallet_names = [self.default_wallet_name, self.wallet]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
# directory content should equal the generated blocks hashes
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
if self.is_wallet_compiled():
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.start_node(1)
self.connect_nodes(0, 1)
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
# Conflicting transactions tests. Give node 0 same wallet seed as
# node 1, generate spends from node 0, and check notifications
# triggered by node 1
self.log.info("test -walletnotify with conflicting transactions")
self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(keyhash_to_p2pkh(hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])[::-1])))
self.nodes[0].rescanblockchain()
self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
# Generate transaction on node 0, sync mempools, and check for
# notification on node 1.
tx1 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx1])
# Generate bump transaction, sync mempools, and check for bump1
# notification. In the future, per
# https://github.com/bitcoin/bitcoin/pull/9371, it might be better
# to have notifications for both tx1 and bump1.
bump1 = self.nodes[0].bumpfee(tx1)["txid"]
assert_equal(bump1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([bump1])
# Add bump1 transaction to new block, checking for a notification
# and the correct number of confirmations.
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
self.expect_wallet_notify([bump1])
assert_equal(self.nodes[1].gettransaction(bump1)["confirmations"], 1)
# Generate a second transaction to be bumped.
tx2 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx2 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx2])
# Bump tx2 as bump2 and generate a block on node 0 while
# disconnected, then reconnect and check for notifications on node 1
# about newly confirmed bump2 and newly conflicted tx2.
self.disconnect_nodes(0, 1)
bump2 = self.nodes[0].bumpfee(tx2)["txid"]
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
assert_equal(self.nodes[0].gettransaction(bump2)["confirmations"], 1)
assert_equal(tx2 in self.nodes[1].getrawmempool(), True)
self.connect_nodes(0, 1)
self.sync_blocks()
self.expect_wallet_notify([bump2, tx2])
assert_equal(self.nodes[1].gettransaction(bump2)["confirmations"], 1)
# TODO: add test for `-alertnotify` large fork notifications
def expect_wallet_notify(self, tx_ids):
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10)
assert_equal(sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
if __name__ == '__main__':
NotificationsTest().main()
|
Tiduszk/CS-100 | refs/heads/master | Chapter 3/Book Exercises/C3-2.py | 2 | #Made by Zachary C. on 10/2/16
#Algorithm
#1. Start
#2. Greet user
#3. Ask user for the length of rectangle 1
#4. Save length of rectangle 1 (length_1)
#5. Ask user for the width of rectangle 1
#6. Save width of rectangle 1 (width_1)
#7. Ask user for the length of rectangle 2
#8. Save the length of rectangle 2 (length_2)
#9. Ask user for the width of rectangle 2
#10. Save the width of rectangle 2 (width_2)
#11. Calculate the area of rectangle 1 (length_1 * width_1)
#12. Save the area of rectangle 1 (area_1)
#13. Calculate the area of rectangle 2 (length_2 * width_2)
#14. Save the area of rectangle 2 (area_2)
#15. Are rectangle 1 and rectangle 2 unequal?
#15a1. Is rectangle 1 larger than rectangle 2?
#15a1a1. Tell the user rectangle 1 is larger
#15a1a2. Go to step 16
#15a1b1. Tell the user rectangle 2 is larger
#15a1b2. Go to step 16
#15b1. Tell the user the rectangles are the same size
#15b2. Go to step 16
#16. Sign off
#17. End
#1. Start
#2. Greet user
print('This program calculates the area of two rectangles and report which is larger.')
#3. Ask user for the length of rectangle 1
#4. Save length of rectangle 1 (length_1)
#5. Ask user for the width of rectangle 1
#6. Save width of rectangle 1 (width_1)
#7. Ask user for the length of rectangle 2
#8. Save the length of rectangle 2 (length_2)
#9. Ask user for the width of rectangle 2
#10. Save the width of rectangle 2 (width_2)
length_1 = float(input('Please enter the length of the first rectangle: '))
width_1 = float(input('Please enter the width of the first rectangle: '))
length_2 = float(input('Please enter the length of the second rectangle: '))
width_2 = float(input('Please enter the width of the second rectangle: '))
#11. Calculate the area of rectangle 1 (length_1 * width_1)
#12. Save the area of rectangle 1 (area_1)
#13. Calculate the area of rectangle 2 (length_2 * width_2)
#14. Save the area of rectangle 2 (area_2)
area_1 = length_1 * width_1
area_2 = length_2 * width_2
#15. Are rectangle 1 and rectangle 2 unequal?
#15a1. Is rectangle 1 larger than rectangle 2?
#15a1a1. Tell the user rectangle 1 is larger
#15a1a2. Go to step 16
#15a1b1. Tell the user rectangle 2 is larger
#15a1b2. Go to step 16
#15b1. Tell the user the rectangles are the same size
#15b2. Go to step 16
if area_1 != area_2:
if area_1 > area_2:
print('The first rectangle is larger than the second rectangle.')
else:
print('The second rectangleis larger than the first rectangle.')
else:
print('The rectangles are the same size.')
#16. Sign off
print('Thanks for using my program.')
#17. End |
poppogbr/genropy | refs/heads/master | packages/glbl/model/localita.py | 1 | #!/usr/bin/env python
# encoding: utf-8
class Table(object):
def config_db(self, pkg):
tbl = pkg.table('localita', pkey='id', name_long='Localita', rowcaption='nome,@provincia.sigla:%s (%s)',broadcast=True)
tbl.column('id', size='22', group='_', readOnly=True, name_long='!!Id')
tbl.column('nome', size=':52', name_long='Nome', indexed=True)
tbl.column('provincia', size='2', name_long='Provincia').relation('glbl.provincia.sigla', mode='foreignkey',
onUpdate_sql='cascade', onDelete='raise')
tbl.column('codice_istat', size='6', name_long='Codice Istat')
tbl.column('codice_comune', size='4', name_long='Codice Comune')
tbl.column('prefisso_tel', size='4', name_long='Prefisso Tel')
tbl.column('cap', size='5', name_long='CAP', indexed=True)
def baseView_min(self):
return "nome:80%,prefisso_tel:20%" |
NavyaJayaram/MyRepository | refs/heads/master | SoundCloudUsingAJS/lib/python2.7/site-packages/pip/_vendor/pkg_resources.py | 160 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
|
rupran/ansible | refs/heads/devel | lib/ansible/modules/monitoring/logentries.py | 77 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Ivan Vanderbyl <ivan@app.io>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: logentries
author: "Ivan Vanderbyl (@ivanvanderbyl)"
short_description: Module for tracking logs via logentries.com
description:
- Sends logs to LogEntries in realtime
version_added: "1.6"
options:
path:
description:
- path to a log file
required: true
state:
description:
- following state of the log
choices: [ 'present', 'absent' ]
required: false
default: present
name:
description:
- name of the log
required: false
logtype:
description:
- type of the log
required: false
notes:
- Requires the LogEntries agent which can be installed following the instructions at logentries.com
'''
EXAMPLES = '''
# Track nginx logs
- logentries:
path: /var/log/nginx/access.log
state: present
name: nginx-access-log
# Stop tracking nginx logs
- logentries:
path: /var/log/nginx/error.log
state: absent
'''
def query_log_status(module, le_path, path, state="present"):
""" Returns whether a log is followed or not. """
if state == "present":
rc, out, err = module.run_command("%s followed %s" % (le_path, path))
if rc == 0:
return True
return False
def follow_log(module, le_path, logs, name=None, logtype=None):
""" Follows one or more logs if not already followed. """
followed_count = 0
for log in logs:
if query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
cmd = [le_path, 'follow', log]
if name:
cmd.extend(['--name',name])
if logtype:
cmd.extend(['--type',logtype])
rc, out, err = module.run_command(' '.join(cmd))
if not query_log_status(module, le_path, log):
module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
followed_count += 1
if followed_count > 0:
module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
module.exit_json(changed=False, msg="logs(s) already followed")
def unfollow_log(module, le_path, logs):
""" Unfollows one or more logs if followed. """
removed_count = 0
# Using a for loop in case of error, we can report the package that failed
for log in logs:
# Query the log first, to see if we even need to remove.
if not query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([le_path, 'rm', log])
if query_log_status(module, le_path, log):
module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
removed_count += 1
if removed_count > 0:
module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
module.exit_json(changed=False, msg="logs(s) already unfollowed")
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
name = dict(required=False, default=None, type='str'),
logtype = dict(required=False, default=None, type='str', aliases=['type'])
),
supports_check_mode=True
)
le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
p = module.params
# Handle multiple log files
logs = p["path"].split(",")
logs = filter(None, logs)
if p["state"] in ["present", "followed"]:
follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
elif p["state"] in ["absent", "unfollowed"]:
unfollow_log(module, le_path, logs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
cloudant/spark-cloudant | refs/heads/master | test/test-scripts/schema/schema_sample_size_004.py | 3 | #*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
from pyspark.sql import SparkSession
from os.path import dirname as dirname
import sys
import requests
# add /test to pythonpath so utils can be imported when running from spark
sys.path.append(dirname(dirname(dirname(__file__))))
import helpers.utils as utils
conf = utils.createSparkConf()
spark = SparkSession\
.builder\
.appName("Cloudant Spark SQL Example in Python using dataframes")\
.config(conf=conf)\
.getOrCreate()
def verify():
customerData = spark.sql("SELECT miles_ytd, total_miles FROM customerTable")
customerData.printSchema()
customerData.show(5)
print(customerData.count())
assert customerData.count() == doc_count
# query the index using Cloudant API to get expected count
test_properties = utils.get_test_properties()
url = url = "https://{}/{}".format(
test_properties["cloudanthost"], 'n_customer')
print(url)
response = requests.get(url, auth=(test_properties["cloudantusername"], test_properties["cloudantpassword"]))
assert response.status_code == 200
doc_count = response.json().get("doc_count")
print ('About to test com.cloudant.spark for n_customer with setting schemaSampleSize to -1')
spark.sql(" CREATE TEMPORARY TABLE customerTable USING com.cloudant.spark OPTIONS ( schemaSampleSize '-1',database 'n_customer')")
verify()
|
asm666/sympy | refs/heads/master | sympy/printing/pretty/stringpict.py | 46 | """Prettyprinter by Jurjen Bos.
(I hate spammers: mail me at pietjepuk314 at the reverse of ku.oc.oohay).
All objects have a method that create a "stringPict",
that can be used in the str method for pretty printing.
Updates by Jason Gedge (email <my last name> at cs mun ca)
- terminal_string() method
- minor fixes and changes (mostly to prettyForm)
TODO:
- Allow left/center/right alignment options for above/below and
top/center/bottom alignment options for left/right
"""
from __future__ import print_function, division
from .pretty_symbology import hobj, vobj, xsym, xobj, pretty_use_unicode
from sympy.core.compatibility import u, string_types, range
class stringPict(object):
"""An ASCII picture.
The pictures are represented as a list of equal length strings.
"""
#special value for stringPict.below
LINE = 'line'
def __init__(self, s, baseline=0):
"""Initialize from string.
Multiline strings are centered.
"""
#picture is a string that just can be printed
self.picture = stringPict.equalLengths(s.splitlines())
#baseline is the line number of the "base line"
self.baseline = baseline
self.binding = None
@staticmethod
def equalLengths(lines):
# empty lines
if not lines:
return ['']
width = max(len(line) for line in lines)
return [line.center(width) for line in lines]
def height(self):
"""The height of the picture in characters."""
return len(self.picture)
def width(self):
"""The width of the picture in characters."""
return len(self.picture[0])
@staticmethod
def next(*args):
"""Put a string of stringPicts next to each other.
Returns string, baseline arguments for stringPict.
"""
#convert everything to stringPicts
objects = []
for arg in args:
if isinstance(arg, string_types):
arg = stringPict(arg)
objects.append(arg)
#make a list of pictures, with equal height and baseline
newBaseline = max(obj.baseline for obj in objects)
newHeightBelowBaseline = max(
obj.height() - obj.baseline
for obj in objects)
newHeight = newBaseline + newHeightBelowBaseline
pictures = []
for obj in objects:
oneEmptyLine = [' '*obj.width()]
basePadding = newBaseline - obj.baseline
totalPadding = newHeight - obj.height()
pictures.append(
oneEmptyLine * basePadding +
obj.picture +
oneEmptyLine * (totalPadding - basePadding))
result = [''.join(lines) for lines in zip(*pictures)]
return '\n'.join(result), newBaseline
def right(self, *args):
r"""Put pictures next to this one.
Returns string, baseline arguments for stringPict.
(Multiline) strings are allowed, and are given a baseline of 0.
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("10").right(" + ",stringPict("1\r-\r2",1))[0])
1
10 + -
2
"""
return stringPict.next(self, *args)
def left(self, *args):
"""Put pictures (left to right) at left.
Returns string, baseline arguments for stringPict.
"""
return stringPict.next(*(args + (self,)))
@staticmethod
def stack(*args):
"""Put pictures on top of each other,
from top to bottom.
Returns string, baseline arguments for stringPict.
The baseline is the baseline of the second picture.
Everything is centered.
Baseline is the baseline of the second picture.
Strings are allowed.
The special value stringPict.LINE is a row of '-' extended to the width.
"""
#convert everything to stringPicts; keep LINE
objects = []
for arg in args:
if arg is not stringPict.LINE and isinstance(arg, string_types):
arg = stringPict(arg)
objects.append(arg)
#compute new width
newWidth = max(
obj.width()
for obj in objects
if obj is not stringPict.LINE)
lineObj = stringPict(hobj('-', newWidth))
#replace LINE with proper lines
for i, obj in enumerate(objects):
if obj is stringPict.LINE:
objects[i] = lineObj
#stack the pictures, and center the result
newPicture = []
for obj in objects:
newPicture.extend(obj.picture)
newPicture = [line.center(newWidth) for line in newPicture]
newBaseline = objects[0].height() + objects[1].baseline
return '\n'.join(newPicture), newBaseline
def below(self, *args):
"""Put pictures under this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of top picture
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("x+3").below(
... stringPict.LINE, '3')[0]) #doctest: +NORMALIZE_WHITESPACE
x+3
---
3
"""
s, baseline = stringPict.stack(self, *args)
return s, self.baseline
def above(self, *args):
"""Put pictures above this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of bottom picture.
"""
string, baseline = stringPict.stack(*(args + (self,)))
baseline = len(string.splitlines()) - self.height() + self.baseline
return string, baseline
def parens(self, left='(', right=')', ifascii_nougly=False):
"""Put parentheses around self.
Returns string, baseline arguments for stringPict.
left or right can be None or empty string which means 'no paren from
that side'
"""
h = self.height()
b = self.baseline
# XXX this is a hack -- ascii parens are ugly!
if ifascii_nougly and not pretty_use_unicode():
h = 1
b = 0
res = self
if left:
lparen = stringPict(vobj(left, h), baseline=b)
res = stringPict(*lparen.right(self))
if right:
rparen = stringPict(vobj(right, h), baseline=b)
res = stringPict(*res.right(rparen))
return ('\n'.join(res.picture), res.baseline)
def leftslash(self):
"""Precede object by a slash of the proper size.
"""
# XXX not used anywhere ?
height = max(
self.baseline,
self.height() - 1 - self.baseline)*2 + 1
slash = '\n'.join(
' '*(height - i - 1) + xobj('/', 1) + ' '*i
for i in range(height)
)
return self.left(stringPict(slash, height//2))
def root(self, n=None):
"""Produce a nice root symbol.
Produces ugly results for big n inserts.
"""
# XXX not used anywhere
# XXX duplicate of root drawing in pretty.py
#put line over expression
result = self.above('_'*self.width())
#construct right half of root symbol
height = self.height()
slash = '\n'.join(
' ' * (height - i - 1) + '/' + ' ' * i
for i in range(height)
)
slash = stringPict(slash, height - 1)
#left half of root symbol
if height > 2:
downline = stringPict('\\ \n \\', 1)
else:
downline = stringPict('\\')
#put n on top, as low as possible
if n is not None and n.width() > downline.width():
downline = downline.left(' '*(n.width() - downline.width()))
downline = downline.above(n)
#build root symbol
root = downline.right(slash)
#glue it on at the proper height
#normally, the root symbel is as high as self
#which is one less than result
#this moves the root symbol one down
#if the root became higher, the baseline has to grow too
root.baseline = result.baseline - result.height() + root.height()
return result.left(root)
def render(self, * args, **kwargs):
"""Return the string form of self.
Unless the argument line_break is set to False, it will
break the expression in a form that can be printed
on the terminal without being broken up.
"""
if kwargs["wrap_line"] is False:
return "\n".join(self.picture)
if kwargs["num_columns"] is not None:
# Read the argument num_columns if it is not None
ncols = kwargs["num_columns"]
else:
# Attempt to get a terminal width
ncols = self.terminal_width()
ncols -= 2
if ncols <= 0:
ncols = 78
# If smaller than the terminal width, no need to correct
if self.width() <= ncols:
return type(self.picture[0])(self)
# for one-line pictures we don't need v-spacers. on the other hand, for
# multiline-pictures, we need v-spacers between blocks, compare:
#
# 2 2 3 | a*c*e + a*c*f + a*d | a*c*e + a*c*f + a*d | 3.14159265358979323
# 6*x *y + 4*x*y + | | *e + a*d*f + b*c*e | 84626433832795
# | *e + a*d*f + b*c*e | + b*c*f + b*d*e + b |
# 3 4 4 | | *d*f |
# 4*y*x + x + y | + b*c*f + b*d*e + b | |
# | | |
# | *d*f
i = 0
svals = []
do_vspacers = (self.height() > 1)
while i < self.width():
svals.extend([ sval[i:i + ncols] for sval in self.picture ])
if do_vspacers:
svals.append("") # a vertical spacer
i += ncols
if svals[-1] == '':
del svals[-1] # Get rid of the last spacer
return "\n".join(svals)
def terminal_width(self):
"""Return the terminal width if possible, otherwise return 0.
"""
ncols = 0
try:
import curses
import io
try:
curses.setupterm()
ncols = curses.tigetnum('cols')
except AttributeError:
# windows curses doesn't implement setupterm or tigetnum
# code below from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440694
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
ncols = right - left + 1
except curses.error:
pass
except io.UnsupportedOperation:
pass
except (ImportError, TypeError):
pass
return ncols
def __eq__(self, o):
if isinstance(o, str):
return '\n'.join(self.picture) == o
elif isinstance(o, stringPict):
return o.picture == self.picture
return False
def __hash__(self):
return super(stringPict, self).__hash__()
def __str__(self):
return str.join('\n', self.picture)
def __unicode__(self):
return unicode.join(u('\n'), self.picture)
def __repr__(self):
return "stringPict(%r,%d)" % ('\n'.join(self.picture), self.baseline)
def __getitem__(self, index):
return self.picture[index]
def __len__(self):
return len(self.s)
class prettyForm(stringPict):
"""
Extension of the stringPict class that knows about basic math applications,
optimizing double minus signs.
"Binding" is interpreted as follows::
ATOM this is an atom: never needs to be parenthesized
FUNC this is a function application: parenthesize if added (?)
DIV this is a division: make wider division if divided
POW this is a power: only parenthesize if exponent
MUL this is a multiplication: parenthesize if powered
ADD this is an addition: parenthesize if multiplied or powered
NEG this is a negative number: optimize if added, parenthesize if
multiplied or powered
OPEN this is an open object: parenthesize if added, multiplied, or
powered (example: Piecewise)
"""
ATOM, FUNC, DIV, POW, MUL, ADD, NEG, OPEN = range(8)
def __init__(self, s, baseline=0, binding=0, unicode=None):
"""Initialize from stringPict and binding power."""
stringPict.__init__(self, s, baseline)
self.binding = binding
self.unicode = unicode or s
# Note: code to handle subtraction is in _print_Add
def __add__(self, *others):
"""Make a pretty addition.
Addition of negative numbers is simplified.
"""
arg = self
if arg.binding > prettyForm.NEG:
arg = stringPict(*arg.parens())
result = [arg]
for arg in others:
#add parentheses for weak binders
if arg.binding > prettyForm.NEG:
arg = stringPict(*arg.parens())
#use existing minus sign if available
if arg.binding != prettyForm.NEG:
result.append(' + ')
result.append(arg)
return prettyForm(binding=prettyForm.ADD, *stringPict.next(*result))
def __div__(self, den, slashed=False):
"""Make a pretty division; stacked or slashed.
"""
if slashed:
raise NotImplementedError("Can't do slashed fraction yet")
num = self
if num.binding == prettyForm.DIV:
num = stringPict(*num.parens())
if den.binding == prettyForm.DIV:
den = stringPict(*den.parens())
if num.binding==prettyForm.NEG:
num = num.right(" ")[0]
return prettyForm(binding=prettyForm.DIV, *stringPict.stack(
num,
stringPict.LINE,
den))
def __truediv__(self, o):
return self.__div__(o)
def __mul__(self, *others):
"""Make a pretty multiplication.
Parentheses are needed around +, - and neg.
"""
if len(others) == 0:
return self # We aren't actually multiplying... So nothing to do here.
args = self
if args.binding > prettyForm.MUL:
arg = stringPict(*args.parens())
result = [args]
for arg in others:
result.append(xsym('*'))
#add parentheses for weak binders
if arg.binding > prettyForm.MUL:
arg = stringPict(*arg.parens())
result.append(arg)
len_res = len(result)
for i in range(len_res):
if i < len_res - 1 and result[i] == '-1' and result[i + 1] == xsym('*'):
# substitute -1 by -, like in -1*x -> -x
result.pop(i)
result.pop(i)
result.insert(i, '-')
if result[0][0] == '-':
# if there is a - sign in front of all
# This test was failing to catch a prettyForm.__mul__(prettyForm("-1", 0, 6)) being negative
bin = prettyForm.NEG
if result[0] == '-':
right = result[1]
if right.picture[right.baseline][0] == '-':
result[0] = '- '
else:
bin = prettyForm.MUL
return prettyForm(binding=bin, *stringPict.next(*result))
def __repr__(self):
return "prettyForm(%r,%d,%d)" % (
'\n'.join(self.picture),
self.baseline,
self.binding)
def __pow__(self, b):
"""Make a pretty power.
"""
a = self
use_inline_func_form = False
if b.binding == prettyForm.POW:
b = stringPict(*b.parens())
if a.binding > prettyForm.FUNC:
a = stringPict(*a.parens())
elif a.binding == prettyForm.FUNC:
# heuristic for when to use inline power
if b.height() > 1:
a = stringPict(*a.parens())
else:
use_inline_func_form = True
if use_inline_func_form:
# 2
# sin + + (x)
b.baseline = a.prettyFunc.baseline + b.height()
func = stringPict(*a.prettyFunc.right(b))
return prettyForm(*func.right(a.prettyArgs))
else:
# 2 <-- top
# (x+y) <-- bot
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.above(top))
simpleFunctions = ["sin", "cos", "tan"]
@staticmethod
def apply(function, *args):
"""Functions of one or more variables.
"""
if function in prettyForm.simpleFunctions:
#simple function: use only space if possible
assert len(
args) == 1, "Simple function %s must have 1 argument" % function
arg = args[0].__pretty__()
if arg.binding <= prettyForm.DIV:
#optimization: no parentheses necessary
return prettyForm(binding=prettyForm.FUNC, *arg.left(function + ' '))
argumentList = []
for arg in args:
argumentList.append(',')
argumentList.append(arg.__pretty__())
argumentList = stringPict(*stringPict.next(*argumentList[1:]))
argumentList = stringPict(*argumentList.parens())
return prettyForm(binding=prettyForm.ATOM, *argumentList.left(function))
|
salguarnieri/intellij-community | refs/heads/master | python/testData/codeInsight/controlflow/assignment2.py | 83 | sibling = []
if sibling:
sibling[0].text = 123 |
RetailMeNotSandbox/dartclient | refs/heads/master | docs/source/conf.py | 1 | # -*- coding: utf-8 -*-
#
# dartclient documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 20 11:10:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dartclient'
copyright = u'2016, RetailMeNot, Inc.'
author = u'RetailMeNot, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.1'
# The full version, including alpha/beta/rc tags.
release = u'1.1.dev1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'dartclient v1.1.dev1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dartclientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dartclient.tex', u'dartclient Documentation',
u'RetailMeNot, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dartclient', u'dartclient Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dartclient', u'dartclient Documentation',
author, 'dartclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
CourseTalk/edx-platform | refs/heads/master | lms/djangoapps/django_comment_client/forum/tests.py | 5 | import json
import logging
import ddt
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from lms.lib.comment_client.utils import CommentClientPaginatedResult
from edxmako.tests import mako_middleware_process_request
from django_comment_common.utils import ThreadContext
from django_comment_client.forum import views
from django_comment_client.permissions import get_team
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.utils import strip_none
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.testing import UrlResetMixin
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_MONGO_MODULESTORE,
)
from xmodule.modulestore.tests.factories import check_mongo_calls, CourseFactory, ItemFactory
from courseware.courses import UserNotEnrolled
from nose.tools import assert_true
from mock import patch, Mock, ANY, call
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
log = logging.getLogger(__name__)
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert_true(self.client.login(username=uname, password=password))
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.followed_threads',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
):
def mock_request_impl(*args, **kwargs):
url = args[1]
data = None
if url.endswith("threads") or url.endswith("user_profile"):
data = {
"collection": [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=None,
group_id=group_id,
commentable_id=commentable_id,
)
]
}
elif thread_id and url.endswith(thread_id):
data = make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
data = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
data.update({
"threads_count": 1,
"comments_count": 2
})
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ModuleStoreTestCase):
def setUp(self):
super(SingleThreadTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
self.course.id.to_deprecated_string(),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# old mongo with cache
(ModuleStoreEnum.Type.mongo, 1, 6, 4, 16, 8),
(ModuleStoreEnum.Type.mongo, 50, 6, 4, 16, 8),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, 1, 3, 3, 16, 8),
(ModuleStoreEnum.Type.split, 50, 3, 3, 16, 8),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
response = views.single_thread(
request,
course.id.to_deprecated_string(),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
[num_cached_mongo_calls, num_cached_sql_queries],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text, thread_id=self.mock_thread_id, group_id=self.student_cohort.id
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get("dummy_url")
request.user = self.student
mako_middleware_process_request(request)
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
"dummy_thread_id"
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ContentGroupTestCase):
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
request = RequestFactory().get("dummy_url")
request.user = user
mako_middleware_process_request(request)
def call_single_thread():
return views.single_thread(
request,
unicode(self.course.id),
discussion_id,
thread_id
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
with self.assertRaises(Http404):
call_single_thread()
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_module.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user) # pylint: disable=no-member
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
unicode(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content)
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
self.course.id.to_deprecated_string(),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.forum_form_discussion(
request,
self.course.id.to_deprecated_string()
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = requesting_user
mako_middleware_process_request(request)
return views.user_profile(
request,
self.course.id.to_deprecated_string(),
profiled_user.id
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
self.course.id.to_deprecated_string(),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.discussion1.discussion_id
)
def verify_response(self, response):
"""Verifies that the response contains the appropriate courseware_url and courseware_title"""
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
expected_courseware_url = '/courses/TestX/101/Test_Course/jump_to/i4x://TestX/101/discussion/Discussion1'
expected_courseware_title = 'Chapter / Discussion1'
self.assertEqual(response_data['discussion_data'][0]['courseware_url'], expected_courseware_url)
self.assertEqual(response_data["discussion_data"][0]["courseware_title"], expected_courseware_title)
def test_courseware_data(self, mock_request):
self.verify_response(self.send_request(mock_request))
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='A topic',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student) # pylint: disable=no-member
response = self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
self.verify_response(response)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().get("dummy_url", data=params, **headers)
request.user = self.student
mako_middleware_process_request(request)
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": self.course.id.to_deprecated_string(),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span>1</span> discussion started')
self.assertRegexpMatches(html, r'<span>2</span> comments')
self.assertRegexpMatches(html, r'"id": "{}"'.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r'"title": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"body": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"username": "{}"'.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_html_p2(self, mock_request):
self.check_html(mock_request, page="2")
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_ajax_p2(self, mock_request):
self.check_ajax(mock_request, page="2")
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"django_comment_client.forum.views.single_thread",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": self.course.id.to_deprecated_string()}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumFormDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req): # pylint: disable=unused-argument
"""
Test that XSS attack is prevented
"""
reverse_url = "%s%s" % (reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": unicode(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value = Mock()
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': unicode(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
class ForumDiscussionSearchUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, self.course.id.to_deprecated_string(), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(UserNotEnrolled):
views.forum_form_discussion(request, course_id=self.course.id.to_deprecated_string())
|
eoghan2t9/primou-kernel-IRONMAN | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
amanikamail/flexx | refs/heads/master | flexx/util/screenshot.py | 21 | """ Make screenshots of windows on Windows and Linux.
We need this to do visual tests.
"""
import sys
if sys.platform.startswith('win'):
import ctypes
from ctypes import windll
from ctypes.wintypes import (BOOL, DOUBLE, DWORD, HBITMAP, HDC, HGDIOBJ, HWND,
INT, LPARAM, LONG, RECT, UINT, WORD)
SRCCOPY = 13369376
DIB_RGB_COLORS = BI_RGB = 0
class RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_long),
('top', ctypes.c_long),
('right', ctypes.c_long),
('bottom', ctypes.c_long)]
class BITMAPINFOHEADER(ctypes.Structure):
_fields_ = [('biSize', DWORD), ('biWidth', LONG), ('biHeight', LONG),
('biPlanes', WORD), ('biBitCount', WORD),
('biCompression', DWORD), ('biSizeImage', DWORD),
('biXPelsPerMeter', LONG), ('biYPelsPerMeter', LONG),
('biClrUsed', DWORD), ('biClrImportant', DWORD)]
class BITMAPINFO(ctypes.Structure):
_fields_ = [('bmiHeader', BITMAPINFOHEADER), ('bmiColors', DWORD * 3)]
# Function shorthands
GetClientRect = windll.user32.GetClientRect
GetWindowRect = windll.user32.GetWindowRect
PrintWindow = windll.user32.PrintWindow
GetWindowThreadProcessId = windll.user32.GetWindowThreadProcessId
IsWindowVisible = windll.user32.IsWindowVisible
EnumWindows = windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowDC = windll.user32.GetWindowDC
CreateCompatibleDC = windll.gdi32.CreateCompatibleDC
CreateCompatibleBitmap = windll.gdi32.CreateCompatibleBitmap
SelectObject = windll.gdi32.SelectObject
BitBlt = windll.gdi32.BitBlt
DeleteObject = windll.gdi32.DeleteObject
GetDIBits = windll.gdi32.GetDIBits
# Arg types
windll.user32.GetWindowDC.argtypes = [HWND]
windll.gdi32.CreateCompatibleDC.argtypes = [HDC]
windll.gdi32.CreateCompatibleBitmap.argtypes = [HDC, INT, INT]
windll.gdi32.SelectObject.argtypes = [HDC, HGDIOBJ]
windll.gdi32.BitBlt.argtypes = [HDC, INT, INT, INT, INT, HDC, INT, INT, DWORD]
windll.gdi32.DeleteObject.argtypes = [HGDIOBJ]
windll.gdi32.GetDIBits.argtypes = [HDC, HBITMAP, UINT, UINT, ctypes.c_void_p,
ctypes.POINTER(BITMAPINFO), UINT]
# Return types
windll.user32.GetWindowDC.restypes = HDC
windll.gdi32.CreateCompatibleDC.restypes = HDC
windll.gdi32.CreateCompatibleBitmap.restypes = HBITMAP
windll.gdi32.SelectObject.restypes = HGDIOBJ
windll.gdi32.BitBlt.restypes = BOOL
windll.gdi32.GetDIBits.restypes = INT
windll.gdi32.DeleteObject.restypes = BOOL
def win_for_pid(pid):
""" Get the windows-handle for the first visible window of the
process with the given id.
"""
handles = []
def called_for_each_win(hwnd, lParam):
if not IsWindowVisible(hwnd):
return True
# get the proccessid from the windowhandle
p_id = ctypes.c_int()
t_id = GetWindowThreadProcessId(hwnd, ctypes.byref(p_id))
if p_id.value == pid:
handles.append(hwnd)
return False
return True
EnumWindows(EnumWindowsProc(called_for_each_win), 0)
if handles:
return handles[0]
else:
return None
def screenshot(pid, client=True):
""" Grab a screenshot of the first visible window of the process
with the given id. If client is True, no Window decoration is shown.
This code is derived from https://github.com/BoboTiG/python-mss
"""
# Get handle
hwnd = win_for_pid(pid)
# Get window dimensions
rect = RECT()
if client:
GetClientRect(hwnd, ctypes.byref(rect))
else:
GetWindowRect(hwnd, ctypes.byref(rect))
left, right, top, bottom = rect.left, rect.right, rect.top, rect.bottom
w, h = right - left, bottom - top
hwndDC = saveDC = bmp = None
try:
# Get device contexts
hwndDC = GetWindowDC(hwnd)
saveDC = CreateCompatibleDC(hwndDC)
# Get bitmap
bmp = CreateCompatibleBitmap(hwndDC, w, h)
SelectObject(saveDC, bmp)
if client:
result = PrintWindow(hwnd, saveDC, 1)
else:
result = PrintWindow(hwnd, saveDC, 0)
# Init bitmap info
# We grab the image in RGBX mode, so that each word is 32bit and
# we have no striding, then we transform to RGB
buffer_len = h * w * 4
bmi = BITMAPINFO()
bmi.bmiHeader.biSize = ctypes.sizeof(BITMAPINFOHEADER)
bmi.bmiHeader.biWidth = w
bmi.bmiHeader.biHeight = -h # Why minus? See [1]
bmi.bmiHeader.biPlanes = 1 # Always 1
bmi.bmiHeader.biBitCount = 32
bmi.bmiHeader.biCompression = BI_RGB
# Blit
image = ctypes.create_string_buffer(buffer_len)
bits = windll.gdi32.GetDIBits(saveDC, bmp, 0, h, image, bmi, DIB_RGB_COLORS)
assert bits == h
# Replace pixels values: BGRX to RGB
image2 = ctypes.create_string_buffer(h*w*3)
image2[0::3] = image[2::4]
image2[1::3] = image[1::4]
image2[2::3] = image[0::4]
return bytes(image2), (w, h, 3)
finally:
# Clean up
if hwndDC:
DeleteObject(hwndDC)
if saveDC:
DeleteObject(saveDC)
if bmp:
DeleteObject(bmp)
if __name__ == '__main__':
im, shape = screenshot(5144, True)
from flexx.util import icon
png = icon.write_png(im, shape)
open('C:\\Users\\Almar\\test.png', 'wb').write(png)
|
DazWorrall/ansible | refs/heads/devel | test/units/module_utils/test_postgresql.py | 63 | import json
import sys
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import builtins
from ansible.module_utils._text import to_native
from units.mock.procenv import swap_stdin_and_argv
import pprint
realimport = builtins.__import__
class TestPostgres(unittest.TestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_postgres_pg2_missing_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'psycopg2':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertFalse(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertIn('psycopg2 is not installed', to_native(context.exception))
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
return MagicMock()
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
ensure_ret = mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertFalse(ensure_ret)
pprint.pprint(ensure_ret)
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs_old_version(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
m = MagicMock()
m.__version__ = '2.4.1'
return m
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert='yes')
self.assertIn('psycopg2 must be at least 2.4.3 in order to use', to_native(context.exception))
|
DEVSENSE/PTVS | refs/heads/master | Python/Tests/TestData/WFastCgi/DjangoApp/DjangoApp/wsgi.py | 7 | """
WSGI config for DjangoApplication7 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoApp.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
stewgeo/fmepedia-data-upload-validation-display | refs/heads/master | pythonLibraries/setuptools-0.6c11/build/lib/setuptools/command/rotate.py | 167 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name()+'*'+pattern
files = glob(os.path.join(self.dist_dir,pattern))
files = [(os.path.getmtime(f),f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t,f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
|
cjdelisle/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/win/gyptest-long-command-line.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure long command lines work.
"""
import TestGyp
import subprocess
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja', 'msvs'])
CHDIR = 'long-command-line'
test.run_gyp('long-command-line.gyp', chdir=CHDIR)
test.build('long-command-line.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
maxdeliso/elevatorSim | refs/heads/master | Lib/encodings/tis_620.py | 272 | """ Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\ufffe'
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
underbluewaters/marinemap | refs/heads/master | lingcod/bioregions/migrations/0001_initial.py | 3 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Bioregion'
db.create_table('bioregions_bioregion', (
('geometry', self.gf('django.contrib.gis.db.models.fields.PolygonField')(srid=settings.GEOMETRY_DB_SRID, null=True, blank=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modification_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('bioregions', ['Bioregion'])
def backwards(self, orm):
# Deleting model 'Bioregion'
db.delete_table('bioregions_bioregion')
models = {
'bioregions.bioregion': {
'Meta': {'object_name': 'Bioregion'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': str(settings.GEOMETRY_DB_SRID), 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['bioregions']
|
suttond/MODOI | refs/heads/master | ase/gui/calculator.py | 2 | # encoding: utf-8
"""calculator.py - module for choosing a calculator."""
import gtk
from gettext import gettext as _
import os
import numpy as np
from copy import copy
from ase.gui.setupwindow import SetupWindow
from ase.gui.progress import DefaultProgressIndicator, GpawProgressIndicator
from ase.gui.widgets import pack, oops, cancel_apply_ok
from ase import Atoms
from ase.data import chemical_symbols
import ase
# Asap and GPAW may be imported if selected.
introtext = _("""\
To make most calculations on the atoms, a Calculator object must first
be associated with it. ASE supports a number of calculators, supporting
different elements, and implementing different physical models for the
interatomic interactions.\
""")
# Informational text about the calculators
lj_info_txt = _("""\
The Lennard-Jones pair potential is one of the simplest
possible models for interatomic interactions, mostly
suitable for noble gasses and model systems.
Interactions are described by an interaction length and an
interaction strength.\
""")
emt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au, the Al potential is however not suitable for materials
science application, as the stacking fault energy is wrong.
A number of parameter sets are provided.
<b>Default parameters:</b>
The default EMT parameters, as published in K. W. Jacobsen,
P. Stoltze and J. K. Nørskov, <i>Surf. Sci.</i> <b>366</b>, 394 (1996).
<b>Alternative Cu, Ag and Au:</b>
An alternative set of parameters for Cu, Ag and Au,
reoptimized to experimental data including the stacking
fault energies by Torben Rasmussen (partly unpublished).
<b>Ruthenium:</b>
Parameters for Ruthenium, as published in J. Gavnholt and
J. Schiøtz, <i>Phys. Rev. B</i> <b>77</b>, 035404 (2008).
<b>Metallic glasses:</b>
Parameters for MgCu and CuZr metallic glasses. MgCu
parameters are in N. P. Bailey, J. Schiøtz and
K. W. Jacobsen, <i>Phys. Rev. B</i> <b>69</b>, 144205 (2004).
CuZr in A. Paduraru, A. Kenoufi, N. P. Bailey and
J. Schiøtz, <i>Adv. Eng. Mater.</i> <b>9</b>, 505 (2007).
""")
aseemt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au. In addition, this implementation allows for the use of
H, N, O and C adatoms, although the description of these is
most likely not very good.
<b>This is the ASE implementation of EMT.</b> For large
simulations the ASAP implementation is more suitable; this
implementation is mainly to make EMT available when ASAP is
not installed.
""")
eam_info_txt = _("""\
The EAM/ADP potential is a many-body potential
implementation of the Embedded Atom Method and
equipotential plus the Angular Dependent Potential,
which is an extension of the EAM to include
directional bonds. EAM is suited for FCC metallic
bonding while the ADP is suited for metallic bonds
with some degree of directionality.
For EAM see M.S. Daw and M.I. Baskes,
Phys. Rev. Letters 50 (1983) 1285.
For ADP see Y. Mishin, M.J. Mehl, and
D.A. Papaconstantopoulos, Acta Materialia 53 2005
4029--4041.
Data for the potential is contained in a file in either LAMMPS Alloy
or ADP format which need to be loaded before use. The Interatomic
Potentials Repository Project at http://www.ctcms.nist.gov/potentials/
contains many suitable potential files.
For large simulations the LAMMPS calculator is more
suitable; this implementation is mainly to make EAM
available when LAMMPS is not installed or to develop
new EAM/ADP poentials by matching results using ab
initio.
""")
brenner_info_txt = _("""\
The Brenner potential is a reactive bond-order potential for
carbon and hydrocarbons. As a bond-order potential, it takes
into account that carbon orbitals can hybridize in different
ways, and that carbon can form single, double and triple
bonds. That the potential is reactive means that it can
handle gradual changes in the bond order as chemical bonds
are formed or broken.
The Brenner potential is implemented in Asap, based on a
C implentation published at http://www.rahul.net/pcm/brenner/ .
The potential is documented here:
Donald W Brenner, Olga A Shenderova, Judith A Harrison,
Steven J Stuart, Boris Ni and Susan B Sinnott:
"A second-generation reactive empirical bond order (REBO)
potential energy expression for hydrocarbons",
J. Phys.: Condens. Matter 14 (2002) 783-802.
doi: 10.1088/0953-8984/14/4/312
""")
gpaw_info_txt = _("""\
GPAW implements Density Functional Theory using a
<b>G</b>rid-based real-space representation of the wave
functions, and the <b>P</b>rojector <b>A</b>ugmented <b>W</b>ave
method for handling the core regions.
""")
aims_info_txt = _("""\
FHI-aims is an external package implementing density
functional theory and quantum chemical methods using
all-electron methods and a numeric local orbital basis set.
For full details, see http://www.fhi-berlin.mpg.de/aims/
or Comp. Phys. Comm. v180 2175 (2009). The ASE
documentation contains information on the keywords and
functionalities available within this interface.
""")
aims_pbc_warning_text = _("""\
WARNING:
Your system seems to have more than zero but less than
three periodic dimensions. Please check that this is
really what you want to compute. Assuming full
3D periodicity for this calculator.""")
vasp_info_txt = _("""\
VASP is an external package implementing density
functional functional theory using pseudopotentials
or the projector-augmented wave method together
with a plane wave basis set. For full details, see
http://cms.mpi.univie.ac.at/vasp/vasp/
""")
emt_parameters = (
(_("Default (Al, Ni, Cu, Pd, Ag, Pt, Au)"), None),
(_("Alternative Cu, Ag and Au"), "EMTRasmussenParameters"),
(_("Ruthenium"), "EMThcpParameters"),
(_("CuMg and CuZr metallic glass"), "EMTMetalGlassParameters")
)
class SetCalculator(SetupWindow):
"Window for selecting a calculator."
# List the names of the radio button attributes
radios = ("none", "lj", "emt", "aseemt", "eam", "brenner",
"gpaw", "aims", "vasp")
# List the names of the parameter dictionaries
paramdicts = ("lj_parameters", "eam_parameters", "gpaw_parameters",
"aims_parameters",)
# The name used to store parameters on the gui object
classname = "SetCalculator"
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Select calculator"))
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
pack(vbox, [gtk.Label(_("Calculator:"))])
# No calculator (the default)
self.none_radio = gtk.RadioButton(None, _("None"))
pack(vbox, [self.none_radio])
# Lennard-Jones
self.lj_radio = gtk.RadioButton(self.none_radio,
_("Lennard-Jones (ASAP)"))
self.lj_setup = gtk.Button(_("Setup"))
self.lj_info = InfoButton(lj_info_txt)
self.lj_setup.connect("clicked", self.lj_setup_window)
self.pack_line(vbox, self.lj_radio, self.lj_setup, self.lj_info)
# EMT
self.emt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASAP)"))
self.emt_setup = gtk.combo_box_new_text()
self.emt_param_info = {}
for p in emt_parameters:
self.emt_setup.append_text(p[0])
self.emt_param_info[p[0]] = p[1]
self.emt_setup.set_active(0)
self.emt_info = InfoButton(emt_info_txt)
self.pack_line(vbox, self.emt_radio, self.emt_setup, self.emt_info)
# EMT (ASE implementation)
self.aseemt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASE)"))
self.aseemt_info = InfoButton(aseemt_info_txt)
self.pack_line(vbox, self.aseemt_radio, None, self.aseemt_info)
# EAM
self.eam_radio = gtk.RadioButton(
self.none_radio,
_("EAM - Embedded Atom Method/Angular Dependent Potential (ASE)"))
self.eam_setup = gtk.Button(_("Setup"))
self.eam_setup.connect("clicked", self.eam_setup_window)
self.eam_info = InfoButton(eam_info_txt)
self.pack_line(vbox, self.eam_radio, self.eam_setup, self.eam_info)
# Brenner potential
self.brenner_radio = gtk.RadioButton(
self.none_radio, _("Brenner Potential (ASAP)"))
self.brenner_info = InfoButton(brenner_info_txt)
self.pack_line(vbox, self.brenner_radio, None, self.brenner_info)
# GPAW
self.gpaw_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory (GPAW)")
)
self.gpaw_setup = gtk.Button(_("Setup"))
self.gpaw_info = InfoButton(gpaw_info_txt)
self.gpaw_setup.connect("clicked", self.gpaw_setup_window)
self.pack_line(vbox, self.gpaw_radio, self.gpaw_setup, self.gpaw_info)
# FHI-aims
self.aims_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(FHI-aims)"))
self.aims_setup = gtk.Button(_("Setup"))
self.aims_info = InfoButton(aims_info_txt)
self.aims_setup.connect("clicked", self.aims_setup_window)
self.pack_line(vbox, self.aims_radio, self.aims_setup, self.aims_info)
# VASP
self.vasp_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(VASP)"))
self.vasp_setup = gtk.Button(_("Setup"))
self.vasp_info = InfoButton(vasp_info_txt)
self.vasp_setup.connect("clicked", self.vasp_setup_window)
self.pack_line(vbox, self.vasp_radio, self.vasp_setup, self.vasp_info)
# Buttons etc.
pack(vbox, gtk.Label(""))
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
self.check = gtk.CheckButton(_("Check that the calculator is "
"reasonable."))
self.check.set_active(True)
fr = gtk.Frame()
fr.add(self.check)
fr.show_all()
pack(vbox, [fr], end=True, bottom=True)
# Finalize setup
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
self.load_state()
def pack_line(self, box, radio, setup, info):
hbox = gtk.HBox()
hbox.pack_start(radio, 0, 0)
hbox.pack_start(gtk.Label(" "), 0, 0)
hbox.pack_end(info, 0, 0)
if setup is not None:
radio.connect("toggled", self.radio_toggled, setup)
setup.set_sensitive(False)
hbox.pack_end(setup, 0, 0)
hbox.show_all()
box.pack_start(hbox, 0, 0)
def radio_toggled(self, radio, button):
button.set_sensitive(radio.get_active())
def lj_setup_window(self, widget):
if not self.get_atoms():
return
lj_param = getattr(self, "lj_parameters", None)
LJ_Window(self, lj_param, "lj_parameters")
# When control is retuned, self.lj_parameters has been set.
def eam_setup_window(self, widget):
if not self.get_atoms():
return
eam_param = getattr(self, "eam_parameters", None)
EAM_Window(self, eam_param, "eam_parameters")
# When control is retuned, self.eam_parameters has been set.
def gpaw_setup_window(self, widget):
if not self.get_atoms():
return
gpaw_param = getattr(self, "gpaw_parameters", None)
GPAW_Window(self, gpaw_param, "gpaw_parameters")
# When control is retuned, self.gpaw_parameters has been set.
def aims_setup_window(self, widget):
if not self.get_atoms():
return
aims_param = getattr(self, "aims_parameters", None)
AIMS_Window(self, aims_param, "aims_parameters")
# When control is retuned, self.aims_parameters has been set.
def vasp_setup_window(self, widget):
if not self.get_atoms():
return
vasp_param = getattr(self, "vasp_parameters", None)
VASP_Window(self, vasp_param, "vasp_parameters")
# When control is retuned, self.vasp_parameters has been set.
def get_atoms(self):
"Make an atoms object from the active frame"
images = self.gui.images
frame = self.gui.frame
if images.natoms < 1:
oops(_("No atoms present"))
return False
self.atoms = Atoms(positions=images.P[frame],
symbols=images.Z,
cell=images.A[frame],
pbc=images.pbc,
magmoms=images.M[frame])
if not images.dynamic.all():
from ase.constraints import FixAtoms
self.atoms.set_constraint(FixAtoms(mask=1 - images.dynamic))
return True
def apply(self, *widget):
if self.do_apply():
self.save_state()
return True
else:
return False
def do_apply(self):
nochk = not self.check.get_active()
self.gui.simulation["progress"] = DefaultProgressIndicator()
if self.none_radio.get_active():
self.gui.simulation['calc'] = None
return True
elif self.lj_radio.get_active():
if nochk or self.lj_check():
self.choose_lj()
return True
elif self.emt_radio.get_active():
if nochk or self.emt_check():
self.choose_emt()
return True
elif self.aseemt_radio.get_active():
if nochk or self.aseemt_check():
self.choose_aseemt()
return True
elif self.eam_radio.get_active():
if nochk or self.eam_check():
self.choose_eam()
return True
elif self.brenner_radio.get_active():
if nochk or self.brenner_check():
self.choose_brenner()
return True
elif self.gpaw_radio.get_active():
if nochk or self.gpaw_check():
self.choose_gpaw()
return True
elif self.aims_radio.get_active():
if nochk or self.aims_check():
self.choose_aims()
return True
elif self.vasp_radio.get_active():
if nochk or self.vasp_check():
self.choose_vasp()
return True
return False
def ok(self, *widget):
if self.apply():
self.destroy()
def save_state(self):
state = {}
for r in self.radios:
radiobutton = getattr(self, r + "_radio")
if radiobutton.get_active():
state["radio"] = r
state["emtsetup"] = self.emt_setup.get_active()
state["check"] = self.check.get_active()
for p in self.paramdicts:
if hasattr(self, p):
state[p] = getattr(self, p)
self.gui.module_state[self.classname] = state
def load_state(self):
try:
state = self.gui.module_state[self.classname]
except KeyError:
return
r = state["radio"]
radiobutton = getattr(self, r + "_radio")
radiobutton.set_active(True)
self.emt_setup.set_active(state["emtsetup"])
self.check.set_active(state["check"])
for p in self.paramdicts:
if p in state:
setattr(self, p, state[p])
def lj_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
if not hasattr(self, "lj_parameters"):
oops(_("You must set up the Lennard-Jones parameters"))
return False
try:
self.atoms.set_calculator(asap3.LennardJones(**self.lj_parameters))
except (asap3.AsapError, TypeError, ValueError) as e:
oops(_("Could not create useful Lennard-Jones calculator."),
str(e))
return False
return True
def choose_lj(self):
# Define a function on the fly!
import asap3
def lj_factory(p=self.lj_parameters, lj=asap3.LennardJones):
return lj(**p)
self.gui.simulation["calc"] = lj_factory
def emt_get(self):
import asap3
provider_name = self.emt_setup.get_active_text()
provider = self.emt_param_info[provider_name]
if provider is not None:
provider = getattr(asap3, provider)
return (asap3.EMT, provider, asap3)
def emt_check(self):
if not self.get_atoms():
return False
try:
emt, provider, asap3 = self.emt_get()
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
try:
if provider is not None:
self.atoms.set_calculator(emt(provider()))
else:
self.atoms.set_calculator(emt())
except (asap3.AsapError, TypeError, ValueError) as e:
oops(_("Could not attach EMT calculator to the atoms."),
str(e))
return False
return True
def choose_emt(self):
emt, provider, asap3 = self.emt_get()
if provider is None:
emt_factory = emt
else:
def emt_factory(emt=emt, prov=provider):
return emt(prov())
self.gui.simulation["calc"] = emt_factory
def aseemt_check(self):
return self.element_check("ASE EMT", ['H', 'Al', 'Cu', 'Ag', 'Au',
'Ni', 'Pd', 'Pt', 'C', 'N', 'O'])
def eam_check(self):
from ase.calculators.eam import EAM
if not hasattr(self, "eam_parameters"):
oops(_("You must set up the EAM parameters"))
return False
self.atoms.set_calculator(EAM(**self.eam_parameters))
return self.element_check("EAM", self.atoms.get_calculator().elements)
def choose_eam(self):
from ase.calculators.eam import EAM
def eam_factory(p=self.eam_parameters):
calc = EAM(**p)
return calc
self.gui.simulation["calc"] = eam_factory
def brenner_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
return self.element_check("Brenner potential", ['H', 'C', 'Si'])
def choose_brenner(self):
import asap3
self.gui.simulation["calc"] = asap3.BrennerPotential
def choose_aseemt(self):
import ase.calculators.emt
self.gui.simulation["calc"] = ase.calculators.emt.EMT
# In case Asap has been imported
ase.calculators.emt.EMT.disabled = False
def gpaw_check(self):
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
if not hasattr(self, "gpaw_parameters"):
oops(_("You must set up the GPAW parameters"))
return False
return True
def choose_gpaw(self):
# This reuses the same GPAW object.
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
p = self.gpaw_parameters
use = ["xc", "kpts", "mode"]
if p["use_h"]:
use.append("h")
else:
use.append("gpts")
if p["mode"] == "lcao":
use.append("basis")
gpaw_param = {}
for s in use:
gpaw_param[s] = p[s]
if p["use mixer"]:
mx = getattr(gpaw, p["mixer"])
mx_args = {}
mx_arg_n = ["beta", "nmaxold", "weight"]
if p["mixer"] == "MixerDiff":
mx_arg_n.extend(["beta_m", "nmaxold_m", "weight_m"])
for s in mx_arg_n:
mx_args[s] = p[s]
gpaw_param["mixer"] = mx(**mx_args)
progress = GpawProgressIndicator()
self.gui.simulation["progress"] = progress
gpaw_param["txt"] = progress.get_gpaw_stream()
gpaw_calc = gpaw.GPAW(**gpaw_param)
def gpaw_factory(calc=gpaw_calc):
return calc
self.gui.simulation["calc"] = gpaw_factory
def aims_check(self):
if not hasattr(self, "aims_parameters"):
oops(_("You must set up the FHI-aims parameters"))
return False
return True
def choose_aims(self):
param = self.aims_parameters
from ase.calculators.aims import Aims
calc_aims = Aims(**param)
def aims_factory(calc=calc_aims):
return calc
self.gui.simulation["calc"] = aims_factory
def vasp_check(self):
if not hasattr(self, "vasp_parameters"):
oops(_("You must set up the VASP parameters"))
return False
return True
def choose_vasp(self):
param = self.vasp_parameters
from ase.calculators.vasp import Vasp
calc_vasp = Vasp(**param)
def vasp_factory(calc=calc_vasp):
return calc
self.gui.simulation["calc"] = vasp_factory
def element_check(self, name, elements):
"Check that all atoms are allowed"
elements = [ase.data.atomic_numbers[s] for s in elements]
elements_dict = {}
for e in elements:
elements_dict[e] = True
if not self.get_atoms():
return False
try:
for e in self.atoms.get_atomic_numbers():
elements_dict[e]
except KeyError:
oops(_("Element %(sym)s not allowed by the '%(name)s' calculator")
% dict(sym=ase.data.chemical_symbols[e], name=name))
return False
return True
class InfoButton(gtk.Button):
def __init__(self, txt):
gtk.Button.__init__(self, _("Info"))
self.txt = txt
self.connect('clicked', self.run)
def run(self, widget):
dialog = gtk.MessageDialog(flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE)
dialog.set_markup(self.txt)
dialog.connect('response', lambda x, y: dialog.destroy())
dialog.show()
class LJ_Window(gtk.Window):
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("Lennard-Jones parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
atnos = atoms.get_atomic_numbers()
found = {}
for z in atnos:
found[z] = True
self.present = found.keys()
self.present.sort() # Sorted list of atomic numbers
nelem = len(self.present)
vbox = gtk.VBox()
label = gtk.Label(_("Specify the Lennard-Jones parameters here"))
pack(vbox, [label])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_("Epsilon (eV):"))])
tbl, self.epsilon_adj = self.makematrix(self.present)
pack(vbox, [tbl])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_(u"Sigma (Å):"))])
tbl, self.sigma_adj = self.makematrix(self.present)
pack(vbox, [tbl])
# TRANSLATORS: Shift roughly means adjust (about a potential)
self.modif = gtk.CheckButton(_("Shift to make smooth at cutoff"))
self.modif.set_active(True)
pack(vbox, gtk.Label(""))
pack(vbox, self.modif)
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
# Now, set the parameters
if param and param['elements'] == self.present:
self.set_param(self.epsilon_adj, param["epsilon"], nelem)
self.set_param(self.sigma_adj, param["sigma"], nelem)
self.modif.set_active(param["modified"])
self.show()
self.grab_add() # Lock all other windows
def makematrix(self, present):
nelem = len(present)
adjdict = {}
tbl = gtk.Table(2 + nelem, 2 + nelem)
for i in range(nelem):
s = chemical_symbols[present[i]]
tbl.attach(gtk.Label(" " + str(present[i])), 0, 1, i, i + 1)
tbl.attach(gtk.Label(" " + s + " "), 1, 2, i, i + 1)
tbl.attach(gtk.Label(str(present[i])),
i + 2, i + 3, 1 + nelem, 2 + nelem)
tbl.attach(gtk.Label(s), i + 2, i + 3, nelem, 1 + nelem)
for j in range(i + 1):
adj = gtk.Adjustment(1.0, 0.0, 100.0, 0.1)
spin = gtk.SpinButton(adj, 0.1, 3)
tbl.attach(spin, 2 + j, 3 + j, i, i + 1)
adjdict[(i, j)] = adj
tbl.show_all()
return tbl, adjdict
def set_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
adj[(i, j)].value = params[i, j]
def get_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
params[i, j] = params[j, i] = adj[(i, j)].value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def ok(self, *args):
params = {}
params["elements"] = copy(self.present)
n = len(self.present)
eps = np.zeros((n, n))
self.get_param(self.epsilon_adj, eps, n)
sigma = np.zeros((n, n))
self.get_param(self.sigma_adj, sigma, n)
params["epsilon"] = eps
params["sigma"] = sigma
params["modified"] = self.modif.get_active()
setattr(self.owner, self.attrname, params)
self.destroy()
class EAM_Window(gtk.Window):
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("EAM parameters"))
self.owner = owner
self.attrname = attrname
self.owner = owner
atoms = owner.atoms
self.natoms = len(atoms)
vbox = gtk.VBox()
vbox.show()
self.add(vbox)
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
import_potential_but = gtk.Button(_("Import Potential"))
import_potential_but.connect("clicked", self.import_potential)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(import_potential_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
# Now, set the parameters
if param:
self.eam_file = param['potential']
self.show()
self.grab_add() # Lock all other windows
def ok(self, *args):
if not hasattr(self.owner, "eam_parameters"):
oops(_("You need to import the potential file"))
self.destroy()
def import_potential(self, *args):
dirname = "."
filename = "Al99.eam.alloy"
chooser = gtk.FileChooserDialog(
_('Import .alloy or .adp potential file ... '),
None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(dirname + filename)
openr = chooser.run()
if openr == gtk.RESPONSE_OK:
param = {}
filename = chooser.get_filename()
param['potential'] = filename
setattr(self.owner, self.attrname, param)
chooser.destroy()
class GPAW_Window(gtk.Window):
gpaw_xc_list = ['LDA', 'PBE', 'RPBE', 'revPBE']
gpaw_xc_default = 'PBE'
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("GPAW parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.ucell = atoms.get_cell()
self.size = tuple([self.ucell[i, i] for i in range(3)])
self.pbc = atoms.get_pbc()
self.orthogonal = self.isorthogonal(self.ucell)
self.natoms = len(atoms)
vbox = gtk.VBox()
#label = gtk.Label("Specify the GPAW parameters here")
#pack(vbox, [label])
# Print some info
txt = _("%i atoms.\n") % (self.natoms,)
if self.orthogonal:
txt += _(u"Orthogonal unit cell: %.2f x %.2f x %.2f Å.") % self.size
else:
txt += _("Non-orthogonal unit cell:\n")
txt += str(self.ucell)
pack(vbox, [gtk.Label(txt)])
# XC potential
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.gpaw_xc_list):
self.xc.append_text(x)
if x == self.gpaw_xc_default:
self.xc.set_active(i)
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc])
# Grid spacing
self.radio_h = gtk.RadioButton(None, _("Grid spacing"))
self.h = gtk.Adjustment(0.18, 0.0, 1.0, 0.01)
self.h_spin = gtk.SpinButton(self.h, 0, 2)
pack(vbox, [self.radio_h, gtk.Label(" h = "), self.h_spin,
gtk.Label(_(u"Å"))])
self.radio_gpts = gtk.RadioButton(self.radio_h, _("Grid points"))
self.gpts = []
self.gpts_spin = []
for i in range(3):
g = gtk.Adjustment(4, 4, 1000, 4)
s = gtk.SpinButton(g, 0, 0)
self.gpts.append(g)
self.gpts_spin.append(s)
self.gpts_hlabel = gtk.Label("")
self.gpts_hlabel_format = _(u"h<sub>eff</sub> = (%.3f, %.3f, %.3f) Å")
pack(vbox, [self.radio_gpts, gtk.Label(" gpts = ("), self.gpts_spin[0],
gtk.Label(", "), self.gpts_spin[1], gtk.Label(", "),
self.gpts_spin[2], gtk.Label(") "), self.gpts_hlabel])
self.radio_h.connect("toggled", self.radio_grid_toggled)
self.radio_gpts.connect("toggled", self.radio_grid_toggled)
self.radio_grid_toggled(None)
for g in self.gpts:
g.connect("value-changed", self.gpts_changed)
self.h.connect("value-changed", self.h_changed)
# K-points
self.kpts = []
self.kpts_spin = []
for i in range(3):
if self.pbc[i] and self.orthogonal:
default = np.ceil(20.0 / self.size[i])
else:
default = 1
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
if not self.pbc[i]:
s.set_sensitive(False)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [self.spinpol])
pack(vbox, gtk.Label(""))
# Mode and basis functions
self.mode = gtk.combo_box_new_text()
self.mode.append_text(_("FD - Finite Difference (grid) mode"))
self.mode.append_text(_("LCAO - Linear Combination of Atomic "
"Orbitals"))
self.mode.set_active(0)
pack(vbox, [gtk.Label(_("Mode: ")), self.mode])
self.basis = gtk.combo_box_new_text()
self.basis.append_text(_("sz - Single Zeta"))
self.basis.append_text(_("szp - Single Zeta polarized"))
self.basis.append_text(_("dzp - Double Zeta polarized"))
self.basis.set_active(2) # dzp
pack(vbox, [gtk.Label(_("Basis functions: ")), self.basis])
pack(vbox, gtk.Label(""))
self.mode.connect("changed", self.mode_changed)
self.mode_changed()
# Mixer
self.use_mixer = gtk.CheckButton(_("Non-standard mixer parameters"))
pack(vbox, [self.use_mixer])
self.radio_mixer = gtk.RadioButton(None, "Mixer ")
self.radio_mixersum = gtk.RadioButton(self.radio_mixer, "MixerSum ")
self.radio_mixerdiff = gtk.RadioButton(self.radio_mixer, "MixerDiff")
pack(vbox, [self.radio_mixer, self.radio_mixersum,
self.radio_mixerdiff])
self.beta_adj = gtk.Adjustment(0.25, 0.0, 1.0, 0.05)
self.beta_spin = gtk.SpinButton(self.beta_adj, 0, 2)
self.nmaxold_adj = gtk.Adjustment(3, 1, 10, 1)
self.nmaxold_spin = gtk.SpinButton(self.nmaxold_adj, 0, 0)
self.weight_adj = gtk.Adjustment(50, 1, 500, 1)
self.weight_spin = gtk.SpinButton(self.weight_adj, 0, 0)
pack(vbox, [gtk.Label("beta = "), self.beta_spin,
gtk.Label(" nmaxold = "), self.nmaxold_spin,
gtk.Label(" weight = "), self.weight_spin])
self.beta_m_adj = gtk.Adjustment(0.70, 0.0, 1.0, 0.05)
self.beta_m_spin = gtk.SpinButton(self.beta_m_adj, 0, 2)
self.nmaxold_m_adj = gtk.Adjustment(2, 1, 10, 1)
self.nmaxold_m_spin = gtk.SpinButton(self.nmaxold_m_adj, 0, 0)
self.weight_m_adj = gtk.Adjustment(10, 1, 500, 1)
self.weight_m_spin = gtk.SpinButton(self.weight_m_adj, 0, 0)
pack(vbox, [gtk.Label("beta_m = "), self.beta_m_spin,
gtk.Label(" nmaxold_m = "), self.nmaxold_m_spin,
gtk.Label(" weight_m = "), self.weight_m_spin])
for but in (self.spinpol, self.use_mixer, self.radio_mixer,
self.radio_mixersum, self.radio_mixerdiff):
but.connect("clicked", self.mixer_changed)
self.mixer_changed()
# Eigensolver
# Poisson-solver
vbox.show()
self.add(vbox)
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
# Set stored parameters
if param:
self.xc.set_active(param["xc#"])
if param["use_h"]:
self.radio_h.set_active(True)
else:
self.radio_gpts.set_active(True)
for i in range(3):
self.gpts[i].value = param["gpts"][i]
self.kpts[i].value = param["kpts"][i]
self.spinpol.set_active(param["spinpol"])
self.mode.set_active(param["mode#"])
self.basis.set_active(param["basis#"])
self.use_mixer.set_active(param["use mixer"])
getattr(self, "radio_" + param["mixer"].lower()).set_active(True)
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
getattr(self, t + "_adj").value = param[t]
self.show()
self.grab_add() # Lock all other windows
def radio_grid_toggled(self, widget):
hmode = self.radio_h.get_active()
self.h_spin.set_sensitive(hmode)
for s in self.gpts_spin:
s.set_sensitive(not hmode)
self.gpts_changed()
def gpts_changed(self, *args):
if self.radio_gpts.get_active():
g = np.array([int(g.value) for g in self.gpts])
size = np.array([self.ucell[i, i] for i in range(3)])
txt = self.gpts_hlabel_format % tuple(size / g)
self.gpts_hlabel.set_markup(txt)
else:
self.gpts_hlabel.set_markup("")
def h_changed(self, *args):
h = self.h.value
for i in range(3):
g = 4 * round(self.ucell[i, i] / (4 * h))
self.gpts[i].value = g
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],
self.ucell[i]))
for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def mode_changed(self, *args):
self.basis.set_sensitive(self.mode.get_active() == 1)
def mixer_changed(self, *args):
radios = (self.radio_mixer, self.radio_mixersum, self.radio_mixerdiff)
spin1 = (self.beta_spin, self.nmaxold_spin, self.weight_spin)
spin2 = (self.beta_m_spin, self.nmaxold_m_spin, self.weight_m_spin)
if self.use_mixer.get_active():
# Mixer parameters can be specified.
if self.spinpol.get_active():
self.radio_mixer.set_sensitive(False)
self.radio_mixersum.set_sensitive(True)
self.radio_mixerdiff.set_sensitive(True)
if self.radio_mixer.get_active():
self.radio_mixersum.set_active(True)
else:
self.radio_mixer.set_sensitive(True)
self.radio_mixersum.set_sensitive(False)
self.radio_mixerdiff.set_sensitive(False)
self.radio_mixer.set_active(True)
if self.radio_mixerdiff.get_active():
active = spin1 + spin2
passive = ()
else:
active = spin1
passive = spin2
for widget in active:
widget.set_sensitive(True)
for widget in passive:
widget.set_sensitive(False)
else:
# No mixer parameters
for widget in radios + spin1 + spin2:
widget.set_sensitive(False)
def isorthogonal(self, matrix):
ortho = True
for i in range(3):
for j in range(3):
if i != j and matrix[i][j] != 0.0:
ortho = False
return ortho
def ok(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
param["xc#"] = self.xc.get_active()
param["use_h"] = self.radio_h.get_active()
param["h"] = self.h.value
param["gpts"] = [int(g.value) for g in self.gpts]
param["kpts"] = [int(k.value) for k in self.kpts]
param["spinpol"] = self.spinpol.get_active()
param["mode"] = self.mode.get_active_text().split()[0].lower()
param["mode#"] = self.mode.get_active()
param["basis"] = self.basis.get_active_text().split()[0].lower()
param["basis#"] = self.basis.get_active()
param["use mixer"] = self.use_mixer.get_active()
if self.radio_mixer.get_active():
m = "Mixer"
elif self.radio_mixersum.get_active():
m = "MixerSum"
else:
assert self.radio_mixerdiff.get_active()
m = "MixerDiff"
param["mixer"] = m
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
param[t] = getattr(self, t + "_adj").value
setattr(self.owner, self.attrname, param)
self.destroy()
class AIMS_Window(gtk.Window):
aims_xc_cluster = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05','b3lyp','hse03','hse06','pbe0','pbesol0',
'hf','mp2']
aims_xc_periodic = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05']
aims_xc_default = 'pbe'
aims_relativity_list = ['none','atomic_zora','zora']
aims_keyword_gui_list = ['xc','vdw_correction_hirshfeld','k_grid','spin','charge','relativistic',
'sc_accuracy_etot','sc_accuracy_eev','sc_accuracy_rho','sc_accuracy_forces',
'compute_forces','run_command','species_dir','default_initial_moment']
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
if not self.periodic and atoms.get_pbc().any():
aims_periodic_warning = True
self.periodic = True
else:
aims_periodic_warning = False
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
self.aims_keyword_list =float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+input_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("FHI-aims parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % (natoms)
if self.periodic:
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is:\n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
self.xc_list = self.aims_xc_periodic
else:
txt += _("Non-periodic geometry.\n")
self.xc_list = self.aims_xc_cluster
pack(vbox, [gtk.Label(txt)])
# XC functional & dispersion correction
self.xc = gtk.combo_box_new_text()
self.xc_setup = False
self.TS = gtk.CheckButton(_("Hirshfeld-based dispersion correction"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),self.xc])
pack(vbox, [self.TS])
pack(vbox, [gtk.Label("")])
# k-grid?
if self.periodic:
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
pack(vbox, gtk.Label(""))
# Spin polarized, charge, relativity
self.spinpol = gtk.CheckButton(_("Spin / initial moment "))
self.spinpol.connect('toggled',self.spinpol_changed)
self.moment = gtk.Adjustment(0,-100,100,0.1)
self.moment_spin = gtk.SpinButton(self.moment, 0, 0)
self.moment_spin.set_digits(2)
self.moment_spin.set_sensitive(False)
self.charge = gtk.Adjustment(0,-100,100,0.1)
self.charge_spin = gtk.SpinButton(self.charge, 0, 0)
self.charge_spin.set_digits(2)
self.relativity_type = gtk.combo_box_new_text()
for i, x in enumerate(self.aims_relativity_list):
self.relativity_type.append_text(x)
self.relativity_type.connect('changed',self.relativity_changed)
self.relativity_threshold = gtk.Entry(max=8)
self.relativity_threshold.set_text('1.00e-12')
self.relativity_threshold.set_sensitive(False)
pack(vbox, [self.spinpol,
self.moment_spin,
gtk.Label(_(" Charge")),
self.charge_spin,
gtk.Label(_(" Relativity")),
self.relativity_type,
gtk.Label(_(" Threshold")),
self.relativity_threshold])
pack(vbox, gtk.Label(""))
# self-consistency criteria
pack(vbox,[gtk.Label(_("Self-consistency convergence:"))])
self.sc_tot_energy = gtk.Adjustment(1e-6, 1e-6, 1e0, 1e-6)
self.sc_tot_energy_spin = gtk.SpinButton(self.sc_tot_energy, 0, 0)
self.sc_tot_energy_spin.set_digits(6)
self.sc_tot_energy_spin.set_numeric(True)
self.sc_sum_eigenvalue = gtk.Adjustment(1e-3, 1e-6, 1e0, 1e-6)
self.sc_sum_eigenvalue_spin = gtk.SpinButton(self.sc_sum_eigenvalue, 0, 0)
self.sc_sum_eigenvalue_spin.set_digits(6)
self.sc_sum_eigenvalue_spin.set_numeric(True)
self.sc_density = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_density_spin = gtk.SpinButton(self.sc_density, 0, 0)
self.sc_density_spin.set_digits(6)
self.sc_density_spin.set_numeric(True)
self.compute_forces = gtk.CheckButton(_("Compute forces"))
self.compute_forces.set_active(True)
self.compute_forces.connect("toggled", self.compute_forces_toggled,"")
self.sc_forces = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_forces_spin = gtk.SpinButton(self.sc_forces, 0, 0)
self.sc_forces_spin.set_numeric(True)
self.sc_forces_spin.set_digits(6)
# XXX: use gtk table for layout. Spaces will not work well otherwise
# (depend on fonts, widget style, ...)
# TRANSLATORS: Don't care too much about these, just get approximately
# the same string lengths
pack(vbox, [gtk.Label(_("Energy: ")),
self.sc_tot_energy_spin,
gtk.Label(_(" eV Sum of eigenvalues: ")),
self.sc_sum_eigenvalue_spin,
gtk.Label(_(" eV"))])
pack(vbox, [gtk.Label(_("Electron density: ")),
self.sc_density_spin,
gtk.Label(_(" Force convergence: ")),
self.sc_forces_spin,
gtk.Label(_(" eV/Ang "))])
pack(vbox, [self.compute_forces])
pack(vbox, gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max=55)
self.expert_keyword_add = gtk.Button(stock=gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and species defaults:
pack(vbox, gtk.Label(_('FHI-aims execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.species_defaults = pack(vbox, gtk.Entry(max=0))
# set defaults from previous instance of the calculator, if applicable:
if param is not None:
self.set_param(param)
else:
self.set_defaults()
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
default_but = gtk.Button(_("Set Defaults"))
default_but.connect("clicked",self.set_defaults)
import_control_but = gtk.Button(_("Import control.in"))
import_control_but.connect("clicked",self.import_control)
export_control_but = gtk.Button(_("Export control.in"))
export_control_but.connect("clicked", self.export_control)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(default_but, 0, 0)
butbox.pack_start(import_control_but, 0, 0)
butbox.pack_start(export_control_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
self.expert_vbox.show()
vbox.show()
self.add(vbox)
self.show()
self.grab_add()
if aims_periodic_warning:
oops(aims_pbc_warning_text)
def set_defaults(self, *args):
atoms = self.owner.atoms.copy()
if not self.xc_setup:
self.xc_setup = True
for i, x in enumerate(self.xc_list):
self.xc.append_text(x)
for i, x in enumerate(self.xc_list):
if x == self.aims_xc_default:
self.xc.set_active(i)
self.TS.set_active(False)
if self.periodic:
self.ucell = atoms.get_cell()
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
self.kpts_spin[i].set_value(default)
self.spinpol.set_active(False)
self.moment.set_value(0)
self.moment_spin.set_sensitive(False)
self.charge.set_value(0)
aims_relativity_default = 'none'
for a in atoms:
if a.number > 20:
aims_relativity_default = 'atomic_zora'
for i, x in enumerate(self.aims_relativity_list):
if x == aims_relativity_default:
self.relativity_type.set_active(i)
self.sc_tot_energy.set_value(1e-6)
self.sc_sum_eigenvalue.set_value(1e-3)
self.sc_density.set_value(1e-4)
self.sc_forces.set_value(1e-4)
for key in self.expert_keywords:
key[0].destroy()
key[1].destroy()
key[2].destroy()
key[3] = False
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
if 'AIMS_COMMAND' in os.environ:
text = os.environ['AIMS_COMMAND']
else:
text = ""
self.run_command.set_text(text)
if 'AIMS_SPECIES_DIR' in os.environ:
text = os.environ['AIMS_SPECIES_DIR']
else:
text = ""
self.species_defaults.set_text(text)
def set_attributes(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
if self.periodic:
param["k_grid"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
if self.spinpol.get_active():
param["spin"] = "collinear"
param["default_initial_moment"] = self.moment.get_value()
else:
param["spin"] = "none"
param["default_initial_moment"] = None
param["vdw_correction_hirshfeld"] = self.TS.get_active()
param["charge"] = self.charge.value
param["relativistic"] = self.relativity_type.get_active_text()
if param["relativistic"] == 'atomic_zora':
param["relativistic"] += " scalar "
if param["relativistic"] == 'zora':
param["relativistic"] += " scalar "+self.relativity_threshold.get_text()
param["sc_accuracy_etot"] = self.sc_tot_energy.value
param["sc_accuracy_eev"] = self.sc_sum_eigenvalue.value
param["sc_accuracy_rho"] = self.sc_density.value
param["compute_forces"] = self.compute_forces.get_active()
param["sc_accuracy_forces"] = self.sc_forces.value
param["run_command"] = self.run_command.get_text()
param["species_dir"] = self.species_defaults.get_text()
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter according to which list it is in
key = option[0].get_text().strip()
val = option[1].get_text().strip()
if key == 'output':
if 'output' in param:
param[key] += [val]
else:
param[key] = [val]
elif key in float_keys or key in exp_keys:
param[key] = float(val)
elif key in list_keys or key in string_keys or key in input_keys:
param[key] = val
elif key in int_keys:
param[key] = int(val)
elif key in bool_keys:
param[key] = bool(val)
setattr(self.owner, self.attrname, param)
def set_param(self, param):
if param["xc"] is not None:
for i, x in enumerate(self.xc_list):
if x == param["xc"]:
self.xc.set_active(i)
if isinstance(param["vdw_correction_hirshfeld"],bool):
self.TS.set_active(param["vdw_correction_hirshfeld"])
if self.periodic and param["k_grid"] is not None:
self.kpts[0].value = int(param["k_grid"][0])
self.kpts[1].value = int(param["k_grid"][1])
self.kpts[2].value = int(param["k_grid"][2])
if param["spin"] is not None:
self.spinpol.set_active(param["spin"] == "collinear")
self.moment_spin.set_sensitive(param["spin"] == "collinear")
if param["default_initial_moment"] is not None:
self.moment.value = param["default_initial_moment"]
if param["charge"] is not None:
self.charge.value = param["charge"]
if param["relativistic"] is not None:
if isinstance(param["relativistic"],(tuple,list)):
rel = param["relativistic"]
else:
rel = param["relativistic"].split()
for i, x in enumerate(self.aims_relativity_list):
if x == rel[0]:
self.relativity_type.set_active(i)
if x == 'zora':
self.relativity_threshold.set_text(rel[2])
self.relativity_threshold.set_sensitive(True)
if param["sc_accuracy_etot"] is not None:
self.sc_tot_energy.value = param["sc_accuracy_etot"]
if param["sc_accuracy_eev"] is not None:
self.sc_sum_eigenvalue.value = param["sc_accuracy_eev"]
if param["sc_accuracy_rho"] is not None:
self.sc_density.value = param["sc_accuracy_rho"]
if param["compute_forces"] is not None:
if param["compute_forces"]:
if param["sc_accuracy_forces"] is not None:
self.sc_forces.value = param["sc_accuracy_forces"]
self.compute_forces.set_active(param["compute_forces"])
else:
self.compute_forces.set_active(False)
if param["run_command"] is not None:
self.run_command.set_text(param["run_command"])
if param["species_dir"] is not None:
self.species_defaults.set_text(param["species_dir"])
for (key,val) in param.items():
if key in self.aims_keyword_list and key not in self.aims_keyword_gui_list:
if val is not None: # = existing "expert keyword"
if key == 'output': # 'output' can be used more than once
options = val
if isinstance(options,str):
options = [options]
for arg in options:
self.expert_keyword_create([key]+[arg])
else:
if isinstance(val,str):
arg = [key]+val.split()
elif isinstance(val,(tuple,list)):
arg = [key]+[str(a) for a in val]
else:
arg = [key]+[str(val)]
self.expert_keyword_create(arg)
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def export_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Export parameters ... '), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
self.set_attributes(*args)
param = getattr(self.owner, "aims_parameters")
from ase.calculators.aims import Aims
calc_temp = Aims(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
atoms_temp.calc.write_control(file=filename)
atoms_temp.calc.write_species(file=filename)
chooser.destroy()
def import_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Import control.in file ... '), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK:
self.set_defaults()
filename = chooser.get_filename()
control = open(filename,'r')
while True:
line = control.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
control.readline()
from ase.io.aims import read_aims_calculator
calc = read_aims_calculator(control)
found_aims_calculator = True
control.close()
if found_aims_calculator:
param = calc.float_params
for key in calc.exp_params:
param[key] = calc.exp_params[key]
for key in calc.string_params:
param[key] = calc.string_params[key]
for key in calc.int_params:
param[key] = calc.int_params[key]
for key in calc.bool_params:
param[key] = calc.bool_params[key]
for key in calc.list_params:
param[key] = calc.list_params[key]
for key in calc.input_parameters:
param[key] = calc.input_parameters[key]
self.set_defaults()
self.set_param(param)
chooser.destroy()
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def compute_forces_toggled(self, *args):
self.sc_forces_spin.set_sensitive(self.compute_forces.get_active())
def relativity_changed(self, *args):
self.relativity_threshold.set_sensitive(self.relativity_type.get_active() == 2)
def spinpol_changed(self, *args):
self.moment_spin.set_sensitive(self.spinpol.get_active())
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.aims_keyword_list and not command[0] in self.aims_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.aims_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s\n"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/aims.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" "),
gtk.Entry(max=45),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
class ExpertDeleteButton(gtk.Button):
def __init__(self, index):
gtk.Button.__init__(self, stock=gtk.STOCK_DELETE)
alignment = self.get_children()[0]
hbox = alignment.get_children()[0]
#self.set_size_request(1, 3)
image, label = hbox.get_children()
if image is not None:
label.set_text('Del')
self.index = index
class VASP_Window(gtk.Window):
vasp_xc_list = ['PW91', 'PBE', 'LDA']
vasp_xc_default = 'PBE'
vasp_prec_default = 'Normal'
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
self.vasp_keyword_gui_list = ['ediff','encut', 'ismear', 'ispin', 'prec', 'sigma']
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
self.vasp_keyword_list = float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+special_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("VASP parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % natoms
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is: \n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
pack(vbox, [gtk.Label(txt)])
# XC functional ()
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.vasp_xc_list):
self.xc.append_text(x)
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc,
gtk.Label(" "),
self.spinpol])
pack(vbox, gtk.Label(""))
# k-grid
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
# Precision of calculation
self.prec = gtk.combo_box_new_text()
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
self.prec.append_text(x)
if x == self.vasp_prec_default:
self.prec.set_active(i)
# cutoff energy
if 'VASP_PP_PATH' in os.environ:
self.encut_min_default, self.encut_max_default = self.get_min_max_cutoff()
else:
self.encut_max_default = 400.0
self.encut_min_default = 100.0
self.encut = gtk.Adjustment(self.encut_max_default, 0, 9999, 10)
self.encut_spin = gtk.SpinButton(self.encut, 0, 0)
self.encut_spin.set_digits(2)
self.encut_spin.connect("value-changed",self.check_encut_warning)
self.encut_warning = gtk.Label("")
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2],
gtk.Label(_(") Cutoff: ")),self.encut_spin,
gtk.Label(_(" Precision: ")),self.prec])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å ")
pack(vbox, [self.kpts_label, self.encut_warning])
self.k_changed()
pack(vbox, gtk.Label(""))
self.ismear = gtk.combo_box_new_text()
for x in ['Fermi', 'Gauss', 'Methfessel-Paxton']:
self.ismear.append_text(x)
self.ismear.set_active(2)
self.smearing_order = gtk.Adjustment(2,0,9,1)
self.smearing_order_spin = gtk.SpinButton(self.smearing_order,0,0)
self.smearing_order_spin.set_digits(0)
self.ismear.connect("changed", self.check_ismear_changed)
self.sigma = gtk.Adjustment(0.1, 0.001, 9.0, 0.1)
self.sigma_spin = gtk.SpinButton(self.sigma,0,0)
self.sigma_spin.set_digits(3)
pack(vbox, [gtk.Label(_("Smearing: ")),
self.ismear,
gtk.Label(_(" order: ")),
self.smearing_order_spin,
gtk.Label(_(" width: ")),
self.sigma_spin])
pack(vbox, gtk.Label(""))
self.ediff = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-4)
self.ediff_spin = gtk.SpinButton(self.ediff, 0, 0)
self.ediff_spin.set_digits(6)
pack(vbox,[gtk.Label(_("Self-consistency convergence: ")),
self.ediff_spin,
gtk.Label(_(" eV"))])
pack(vbox,gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max=55)
self.expert_keyword_add = gtk.Button(stock=gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and location of POTCAR files:
pack(vbox, gtk.Label(_('VASP execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
if 'VASP_COMMAND' in os.environ:
self.run_command.set_text(os.environ['VASP_COMMAND'])
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.pp_path = pack(vbox, gtk.Entry(max=0))
if 'VASP_PP_PATH' in os.environ:
self.pp_path.set_text(os.environ['VASP_PP_PATH'])
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
set_default_but = gtk.Button(_("Set Defaults"))
set_default_but.connect("clicked", self.set_defaults)
import_vasp_but = gtk.Button(_("Import VASP files"))
import_vasp_but.connect("clicked", self.import_vasp_files)
export_vasp_but = gtk.Button(_("Export VASP files"))
export_vasp_but.connect("clicked", self.export_vasp_files)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(set_default_but, 0, 0)
butbox.pack_start(import_vasp_but, 0, 0)
butbox.pack_start(export_vasp_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
self.show()
self.grab_add() # Lock all other windows
self.load_attributes()
def load_attributes(self, directory="."):
"""Sets values of fields of the window according to the values
set inside the INCAR, KPOINTS and POTCAR file in 'directory'."""
from os import chdir
chdir(directory)
# Try and load INCAR, in the current directory
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
try:
calc_temp.read_incar("INCAR")
except IOError:
pass
else:
if calc_temp.spinpol:
self.spinpol.set_active(True)
else:
self.spinpol.set_active(False)
if calc_temp.float_params['encut']:
self.encut.set_value(calc_temp.float_params['encut'])
if calc_temp.int_params['ismear'] == -1: # Fermi
vasp_ismear_default = 'Fermi'
elif calc_temp.int_params['ismear'] == 0: # Gauss
vasp_ismear_default = 'Gauss'
elif calc_temp.int_params['ismear'] > 0: # Methfessel-Paxton
vasp_ismear_default = 'Methfessel-Paxton'
else:
vasp_ismear_default = None
for i, x in enumerate(['Fermi', 'Gauss', 'Methfessel-Paxton']):
if vasp_ismear_default == x:
self.ismear.set_active(i)
if calc_temp.exp_params['ediff']:
self.ediff.set_value(calc_temp.exp_params['ediff'])
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == calc_temp.string_params['prec']:
self.prec.set_active(i)
if calc_temp.float_params['sigma']:
self.sigma.set_value(calc_temp.float_params['sigma'])
import copy
all_params = copy.deepcopy(calc_temp.float_params)
all_params.update(calc_temp.exp_params)
all_params.update(calc_temp.string_params)
all_params.update(calc_temp.int_params)
all_params.update(calc_temp.bool_params)
all_params.update(calc_temp.special_params)
for (key, value) in all_params.items():
if key in self.vasp_keyword_list \
and key not in self.vasp_keyword_gui_list \
and value is not None:
command = key + " " + str(value)
self.expert_keyword_create(command.split())
for (key, value) in calc_temp.list_params.items():
if key == "magmom" and value is not None:
command = key + " "
rep = 1
previous = value[0]
for v in value[1:]:
if v == previous:
rep += 1
else:
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f " % previous
rep = 1
previous = v
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f" % previous
self.expert_keyword_create(command.split())
elif value is not None:
command = key + " "
for v in value:
command += str(v) + " "
self.expert_keyword_create(command.split())
# Try and load POTCAR, in the current directory
try:
calc_temp.read_potcar()
except IOError:
pass
else:
#Set xc read from POTCAR
for i, x in enumerate(self.vasp_xc_list):
if x == calc_temp.input_params['xc']:
self.xc.set_active(i)
# Try and load KPOINTS, in the current directory
try:
calc_temp.read_kpoints("KPOINTS")
except IOError:
pass
else:
# Set KPOINTS grid dimensions
for i in range(3):
self.kpts_spin[i].set_value(calc_temp.input_params['kpts'][i])
def set_attributes(self, *args):
self.param = {}
self.param["xc"] = self.xc.get_active_text()
self.param["prec"] = self.prec.get_active_text()
self.param["kpts"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
self.param["encut"] = self.encut.value
self.param["ediff"] = self.ediff.value
self.param["ismear"] = self.get_ismear()
self.param["sigma"] = self.sigma.value
if self.spinpol.get_active():
self.param["ispin"] = 2
else:
self.param["ispin"] = 1
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter accoding to which list it is in
key = option[0].get_text().split()[0].strip()
val = option[1].get_text().strip()
if key in float_keys or key in exp_keys:
self.param[key] = float(val)
elif key == "magmom":
val = val.replace("*", " * ")
c = val.split()
val = []
i = 0
while i < len(c):
if c[i] == "*":
b = val.pop()
i += 1
for j in range(int(b)):
val.append(float(c[i]))
else:
val.append(float(c[i]))
i += 1
self.param[key] = val
elif key in list_keys:
c = val.split()
val = []
for i in c:
val.append(float(i))
self.param[key] = val
elif key in string_keys or key in special_keys:
self.param[key] = val
elif key in int_keys:
self.param[key] = int(val)
elif key in bool_keys:
self.param[key] = bool(val)
setattr(self.owner, self.attrname, self.param)
os.environ['VASP_COMMAND'] = self.run_command.get_text()
os.environ['VASP_PP_PATH'] = self.pp_path.get_text()
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def get_min_max_cutoff(self, *args):
# determine the recommended energy cutoff limits
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
atoms_temp = self.owner.atoms.copy()
calc_temp.initialize(atoms_temp)
calc_temp.write_potcar(suffix='.check_energy_cutoff')
enmin = -1e6
enmax = -1e6
for line in open("POTCAR.check_energy_cutoff",'r').readlines():
if "ENMIN" in line:
enmax = max(enmax,float(line.split()[2].split(';')[0]))
enmin = max(enmin,float(line.split()[5]))
from os import system
system("rm POTCAR.check_energy_cutoff")
return enmin, enmax
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def check_encut_warning(self,*args):
if self.encut.value < self.encut_min_default:
self.encut_warning.set_markup(_("<b>WARNING:</b> cutoff energy is lower than recommended minimum!"))
else:
self.encut_warning.set_markup("")
def check_ismear_changed(self,*args):
if self.ismear.get_active_text() == 'Methfessel-Paxton':
self.smearing_order_spin.set_sensitive(True)
else:
self.smearing_order_spin.set_sensitive(False)
def get_ismear(self,*args):
type = self.ismear.get_active_text()
if type == 'Methfessel-Paxton':
ismear_value = self.smearing_order.value
elif type == 'Fermi':
ismear_value = -1
else:
ismear_value = 0
return ismear_value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def set_defaults(self, *args):
# Reset fields to what they were
self.spinpol.set_active(False)
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == self.vasp_prec_default:
self.prec.set_active(i)
self.encut_spin.set_value(self.encut_max_default)
self.ismear.set_active(2)
self.smearing_order.set_value(2)
self.ediff.set_value(1e-4)
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
for i, x in enumerate(self.vasp_xc_list):
if x == self.vasp_xc_default:
self.xc.set_active(i)
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
for i in range(3):
self.kpts_spin[i].set_value(default)
def import_vasp_files(self, *args):
dirname = ""
chooser = gtk.FileChooserDialog(
_('Import VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(dirname)
openr = chooser.run()
if openr == gtk.RESPONSE_OK or openr == gtk.RESPONSE_SAVE:
dirname = chooser.get_filename()
self.load_attributes(dirname)
chooser.destroy()
def export_vasp_files(self, *args):
filename = ""
chooser = gtk.FileChooserDialog(
_('Export VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
from os import chdir
chdir(filename)
self.set_attributes(*args)
param = getattr(self.owner, "vasp_parameters")
from ase.calculators.vasp import Vasp
calc_temp = Vasp(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
calc_temp.initialize(atoms_temp)
calc_temp.write_incar(atoms_temp)
calc_temp.write_potcar()
calc_temp.write_kpoints()
calc_temp.write_sort_file()
from ase.io.vasp import write_vasp
write_vasp('POSCAR', calc_temp.atoms_sorted, symbol_count=calc_temp.symbol_count)
chooser.destroy()
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.vasp_keyword_list and not command[0] in self.vasp_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.vasp_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/vasp.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
if command[1] == "=":
command.remove("=")
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" = "),
gtk.Entry(max=55),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
|
ericfc/django | refs/heads/master | tests/migrations/test_migrations_run_before/0003_third.py | 427 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""
This is a wee bit crazy, but it's just to show that run_before works.
"""
dependencies = [
("migrations", "0001_initial"),
]
run_before = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
],
)
]
|
m0ose/pydc1394 | refs/heads/master | examples/display_qt.py | 2 | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of pydc1394.
#
# pydc1394 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# pydc1394 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pydc1394. If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009, 2010 by Holger Rapp <HolgerRapp@gmx.net>
# and the pydc1394 contributors (see README File)
import sys
import optparse
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from pydc1394 import DC1394Library, Camera
from pydc1394.ui.qt import LiveCameraWin
from pydc1394.cmdline import add_common_options, handle_common_options
def main():
p = optparse.OptionParser(usage="Usage: %prog [ options ]\n"
"This program displays a live image of your camera")
add_common_options(p)
options, args = p.parse_args()
l = DC1394Library()
cam = handle_common_options(options,l)
if cam:
app = QApplication(args)
w1 = LiveCameraWin(cam); w1.show(); w1.raise_()
sys.exit(app.exec_())
main()
|
dparlevliet/zelenka-report-storage | refs/heads/master | server-local/twisted/protocols/htb.py | 51 | # -*- test-case-name: twisted.test.test_htb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Hierarchical Token Bucket traffic shaping.
Patterned after U{Martin Devera's Hierarchical Token Bucket traffic
shaper for the Linux kernel<http://luxik.cdi.cz/~devik/qos/htb/>}.
@seealso: U{HTB Linux queuing discipline manual - user guide
<http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm>}
@seealso: U{Token Bucket Filter in Linux Advanced Routing & Traffic Control
HOWTO<http://lartc.org/howto/lartc.qdisc.classless.html#AEN682>}
"""
# TODO: Investigate whether we should be using os.times()[-1] instead of
# time.time. time.time, it has been pointed out, can go backwards. Is
# the same true of os.times?
from time import time
from zope.interface import implements, Interface
from twisted.protocols import pcp
class Bucket:
"""
Implementation of a Token bucket.
A bucket can hold a certain number of tokens and it drains over time.
@cvar maxburst: The maximum number of tokens that the bucket can
hold at any given time. If this is C{None}, the bucket has
an infinite size.
@type maxburst: C{int}
@cvar rate: The rate at which the bucket drains, in number
of tokens per second. If the rate is C{None}, the bucket
drains instantaneously.
@type rate: C{int}
"""
maxburst = None
rate = None
_refcount = 0
def __init__(self, parentBucket=None):
"""
Create a L{Bucket} that may have a parent L{Bucket}.
@param parentBucket: If a parent Bucket is specified,
all L{add} and L{drip} operations on this L{Bucket}
will be applied on the parent L{Bucket} as well.
@type parentBucket: L{Bucket}
"""
self.content = 0
self.parentBucket = parentBucket
self.lastDrip = time()
def add(self, amount):
"""
Adds tokens to the L{Bucket} and its C{parentBucket}.
This will add as many of the C{amount} tokens as will fit into both
this L{Bucket} and its C{parentBucket}.
@param amount: The number of tokens to try to add.
@type amount: C{int}
@returns: The number of tokens that actually fit.
@returntype: C{int}
"""
self.drip()
if self.maxburst is None:
allowable = amount
else:
allowable = min(amount, self.maxburst - self.content)
if self.parentBucket is not None:
allowable = self.parentBucket.add(allowable)
self.content += allowable
return allowable
def drip(self):
"""
Let some of the bucket drain.
The L{Bucket} drains at the rate specified by the class
variable C{rate}.
@returns: C{True} if the bucket is empty after this drip.
@returntype: C{bool}
"""
if self.parentBucket is not None:
self.parentBucket.drip()
if self.rate is None:
self.content = 0
else:
now = time()
deltaTime = now - self.lastDrip
deltaTokens = deltaTime * self.rate
self.content = max(0, self.content - deltaTokens)
self.lastDrip = now
return self.content == 0
class IBucketFilter(Interface):
def getBucketFor(*somethings, **some_kw):
"""
Return a L{Bucket} corresponding to the provided parameters.
@returntype: L{Bucket}
"""
class HierarchicalBucketFilter:
"""
Filter things into buckets that can be nested.
@cvar bucketFactory: Class of buckets to make.
@type bucketFactory: L{Bucket}
@cvar sweepInterval: Seconds between sweeping out the bucket cache.
@type sweepInterval: C{int}
"""
implements(IBucketFilter)
bucketFactory = Bucket
sweepInterval = None
def __init__(self, parentFilter=None):
self.buckets = {}
self.parentFilter = parentFilter
self.lastSweep = time()
def getBucketFor(self, *a, **kw):
"""
Find or create a L{Bucket} corresponding to the provided parameters.
Any parameters are passed on to L{getBucketKey}, from them it
decides which bucket you get.
@returntype: L{Bucket}
"""
if ((self.sweepInterval is not None)
and ((time() - self.lastSweep) > self.sweepInterval)):
self.sweep()
if self.parentFilter:
parentBucket = self.parentFilter.getBucketFor(self, *a, **kw)
else:
parentBucket = None
key = self.getBucketKey(*a, **kw)
bucket = self.buckets.get(key)
if bucket is None:
bucket = self.bucketFactory(parentBucket)
self.buckets[key] = bucket
return bucket
def getBucketKey(self, *a, **kw):
"""
Construct a key based on the input parameters to choose a L{Bucket}.
The default implementation returns the same key for all
arguments. Override this method to provide L{Bucket} selection.
@returns: Something to be used as a key in the bucket cache.
"""
return None
def sweep(self):
"""
Remove empty buckets.
"""
for key, bucket in self.buckets.items():
bucket_is_empty = bucket.drip()
if (bucket._refcount == 0) and bucket_is_empty:
del self.buckets[key]
self.lastSweep = time()
class FilterByHost(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each host.
"""
sweepInterval = 60 * 20
def getBucketKey(self, transport):
return transport.getPeer()[1]
class FilterByServer(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each service.
"""
sweepInterval = None
def getBucketKey(self, transport):
return transport.getHost()[2]
class ShapedConsumer(pcp.ProducerConsumerProxy):
"""
Wraps a C{Consumer} and shapes the rate at which it receives data.
"""
# Providing a Pull interface means I don't have to try to schedule
# traffic with callLaters.
iAmStreaming = False
def __init__(self, consumer, bucket):
pcp.ProducerConsumerProxy.__init__(self, consumer)
self.bucket = bucket
self.bucket._refcount += 1
def _writeSomeData(self, data):
# In practice, this actually results in obscene amounts of
# overhead, as a result of generating lots and lots of packets
# with twelve-byte payloads. We may need to do a version of
# this with scheduled writes after all.
amount = self.bucket.add(len(data))
return pcp.ProducerConsumerProxy._writeSomeData(self, data[:amount])
def stopProducing(self):
pcp.ProducerConsumerProxy.stopProducing(self)
self.bucket._refcount -= 1
class ShapedTransport(ShapedConsumer):
"""
Wraps a C{Transport} and shapes the rate at which it receives data.
This is a L{ShapedConsumer} with a little bit of magic to provide for
the case where the consumer it wraps is also a C{Transport} and people
will be attempting to access attributes this does not proxy as a
C{Consumer} (e.g. C{loseConnection}).
"""
# Ugh. We only wanted to filter IConsumer, not ITransport.
iAmStreaming = False
def __getattr__(self, name):
# Because people will be doing things like .getPeer and
# .loseConnection on me.
return getattr(self.consumer, name)
class ShapedProtocolFactory:
"""
Dispense C{Protocols} with traffic shaping on their transports.
Usage::
myserver = SomeFactory()
myserver.protocol = ShapedProtocolFactory(myserver.protocol,
bucketFilter)
Where C{SomeServerFactory} is a L{twisted.internet.protocol.Factory}, and
C{bucketFilter} is an instance of L{HierarchicalBucketFilter}.
"""
def __init__(self, protoClass, bucketFilter):
"""
Tell me what to wrap and where to get buckets.
@param protoClass: The class of C{Protocol} this will generate
wrapped instances of.
@type protoClass: L{Protocol<twisted.internet.interfaces.IProtocol>}
class
@param bucketFilter: The filter which will determine how
traffic is shaped.
@type bucketFilter: L{HierarchicalBucketFilter}.
"""
# More precisely, protoClass can be any callable that will return
# instances of something that implements IProtocol.
self.protocol = protoClass
self.bucketFilter = bucketFilter
def __call__(self, *a, **kw):
"""
Make a C{Protocol} instance with a shaped transport.
Any parameters will be passed on to the protocol's initializer.
@returns: A C{Protocol} instance with a L{ShapedTransport}.
"""
proto = self.protocol(*a, **kw)
origMakeConnection = proto.makeConnection
def makeConnection(transport):
bucket = self.bucketFilter.getBucketFor(transport)
shapedTransport = ShapedTransport(transport, bucket)
return origMakeConnection(shapedTransport)
proto.makeConnection = makeConnection
return proto
|
jlcarmic/producthunt_simulator | refs/heads/master | venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.py | 108 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
from scipy._lib.six import xrange
from scipy.signal import wavelets
class TestWavelets(TestCase):
def test_qmf(self):
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
def test_daub(self):
for i in xrange(1, 15):
assert_equal(len(wavelets.daub(i)), i * 2)
def test_cascade(self):
for J in xrange(1, 7):
for i in xrange(1, 5):
lpcoef = wavelets.daub(i)
k = len(lpcoef)
x, phi, psi = wavelets.cascade(lpcoef, J)
assert_(len(x) == len(phi) == len(psi))
assert_equal(len(x), (k - 1) * 2 ** J)
def test_morlet(self):
x = wavelets.morlet(50, 4.1, complete=True)
y = wavelets.morlet(50, 4.1, complete=False)
# Test if complete and incomplete wavelet have same lengths:
assert_equal(len(x), len(y))
# Test if complete wavelet is less than incomplete wavelet:
assert_array_less(x, y)
x = wavelets.morlet(10, 50, complete=False)
y = wavelets.morlet(10, 50, complete=True)
# For large widths complete and incomplete wavelets should be
# identical within numerical precision:
assert_equal(x, y)
# miscellaneous tests:
x = np.array([1.73752399e-09 + 9.84327394e-25j,
6.49471756e-01 + 0.00000000e+00j,
1.73752399e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=True)
assert_array_almost_equal(x, y)
x = np.array([2.00947715e-09 + 9.84327394e-25j,
7.51125544e-01 + 0.00000000e+00j,
2.00947715e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=False)
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=True)
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=True)
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=True)
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=False)
assert_array_almost_equal(x, y, decimal=2)
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
def test_ricker(self):
w = wavelets.ricker(1.0, 1)
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
assert_array_equal(w, expected)
lengths = [5, 11, 15, 51, 101]
for length in lengths:
w = wavelets.ricker(length, 1.0)
assert_(len(w) == length)
max_loc = np.argmax(w)
assert_(max_loc == (length // 2))
points = 100
w = wavelets.ricker(points, 2.0)
half_vec = np.arange(0, points // 2)
#Wavelet should be symmetric
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
#Check zeros
aas = [5, 10, 15, 20, 30]
points = 99
for a in aas:
w = wavelets.ricker(points, a)
vec = np.arange(0, points) - (points - 1.0) / 2
exp_zero1 = np.argmin(np.abs(vec - a))
exp_zero2 = np.argmin(np.abs(vec + a))
assert_array_almost_equal(w[exp_zero1], 0)
assert_array_almost_equal(w[exp_zero2], 0)
def test_cwt(self):
widths = [1.0]
delta_wavelet = lambda s, t: np.array([1])
len_data = 100
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
#Test delta function input gives same data as output
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
assert_array_almost_equal(test_data, cwt_dat.flatten())
#Check proper shape on output
widths = [1, 3, 4, 5, 10]
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
widths = [len_data * 10]
#Note: this wavelet isn't defined quite right, but is fine for this test
flat_wavelet = lambda l, w: np.ones(w) / w
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
assert_array_almost_equal(cwt_dat, np.mean(test_data))
if __name__ == "__main__":
run_module_suite()
|
emilkjer/xhtml2pdf | refs/heads/master | demo/tgpisa/setup.py | 168 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from turbogears.finddata import find_package_data
import os
execfile(os.path.join("tgpisa", "release.py"))
packages=find_packages()
package_data = find_package_data(where='tgpisa',
package='tgpisa')
if os.path.isdir('locales'):
packages.append('locales')
package_data.update(find_package_data(where='locales',
exclude=('*.po',), only_in_packages=False))
setup(
name="tgpisa",
version=version,
# uncomment the following lines if you fill them out in release.py
#description=description,
#author=author,
#author_email=email,
#url=url,
#download_url=download_url,
#license=license,
install_requires=[
"TurboGears >= 1.0.4.3",
"SQLObject>=0.8,<=0.10.0"
],
zip_safe=False,
packages=packages,
package_data=package_data,
keywords=[
# Use keywords if you'll be adding your package to the
# Python Cheeseshop
# if this has widgets, uncomment the next line
# 'turbogears.widgets',
# if this has a tg-admin command, uncomment the next line
# 'turbogears.command',
# if this has identity providers, uncomment the next line
# 'turbogears.identity.provider',
# If this is a template plugin, uncomment the next line
# 'python.templating.engines',
# If this is a full application, uncomment the next line
# 'turbogears.app',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: TurboGears',
# if this is an application that you'll distribute through
# the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Applications',
# if this is a package that includes widgets that you'll distribute
# through the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Widgets',
],
test_suite='nose.collector',
entry_points = {
'console_scripts': [
'start-tgpisa = tgpisa.commands:start',
],
},
# Uncomment next line and create a default.cfg file in your project dir
# if you want to package a default configuration in your egg.
#data_files = [('config', ['default.cfg'])],
)
|
asimshankar/tensorflow | refs/heads/master | tensorflow/python/keras/applications/vgg16.py | 12 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import vgg16
from tensorflow.python.keras.applications import keras_modules_injection
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.applications.vgg16.VGG16',
'keras.applications.VGG16')
@keras_modules_injection
def VGG16(*args, **kwargs):
return vgg16.VGG16(*args, **kwargs)
@tf_export('keras.applications.vgg16.decode_predictions')
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return vgg16.decode_predictions(*args, **kwargs)
@tf_export('keras.applications.vgg16.preprocess_input')
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return vgg16.preprocess_input(*args, **kwargs)
|
dannyboi104/SickRage | refs/heads/master | lib/rtorrent/rpc/__init__.py | 158 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import inspect
import rtorrent
import re
from rtorrent.common import bool_to_int, convert_version_tuple_to_str,\
safe_repr
from rtorrent.err import MethodError
from rtorrent.compat import xmlrpclib
def get_varname(rpc_call):
"""Transform rpc method into variable name.
@newfield example: Example
@example: if the name of the rpc method is 'p.get_down_rate', the variable
name will be 'down_rate'
"""
# extract variable name from xmlrpc func name
r = re.search(
"([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I)
if r:
return(r.groups()[-1])
else:
return(None)
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
if rt_obj._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
convert_version_tuple_to_str(method.min_version))
raise MethodError(msg)
class DummyClass:
def __init__(self):
pass
class Method:
"""Represents an individual RPC method"""
def __init__(self, _class, method_name,
rpc_call, docstring=None, varname=None, **kwargs):
self._class = _class # : Class this method is associated with
self.class_name = _class.__name__
self.method_name = method_name # : name of public-facing method
self.rpc_call = rpc_call # : name of rpc method
self.docstring = docstring # : docstring for rpc method (optional)
self.varname = varname # : variable for the result of the method call, usually set to self.varname
self.min_version = kwargs.get("min_version", (
0, 0, 0)) # : Minimum version of rTorrent required
self.boolean = kwargs.get("boolean", False) # : returns boolean value?
self.post_process_func = kwargs.get(
"post_process_func", None) # : custom post process function
self.aliases = kwargs.get(
"aliases", []) # : aliases for method (optional)
self.required_args = []
#: Arguments required when calling the method (not utilized)
self.method_type = self._get_method_type()
if self.varname is None:
self.varname = get_varname(self.rpc_call)
assert self.varname is not None, "Couldn't get variable name."
def __repr__(self):
return safe_repr("Method(method_name='{0}', rpc_call='{1}')",
self.method_name, self.rpc_call)
def _get_method_type(self):
"""Determine whether method is a modifier or a retriever"""
if self.method_name[:4] == "set_": return('m') # modifier
else:
return('r') # retriever
def is_modifier(self):
if self.method_type == 'm':
return(True)
else:
return(False)
def is_retriever(self):
if self.method_type == 'r':
return(True)
else:
return(False)
def is_available(self, rt_obj):
if rt_obj._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj._get_rpc_methods():
return(False)
else:
return(True)
class Multicall:
def __init__(self, class_obj, **kwargs):
self.class_obj = class_obj
if class_obj.__class__.__name__ == "RTorrent":
self.rt_obj = class_obj
else:
self.rt_obj = class_obj._rt_obj
self.calls = []
def add(self, method, *args):
"""Add call to multicall
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
@param args: call arguments
"""
# if a raw rpc method was given instead of a Method instance,
# try and find the instance for it. And if all else fails, create a
# dummy Method instance
if isinstance(method, str):
result = find_method(method)
# if result not found
if result == -1:
method = Method(DummyClass, method, method)
else:
method = result
# ensure method is available before adding
if not method.is_available(self.rt_obj):
_handle_unavailable_rpc_method(method, self.rt_obj)
self.calls.append((method, args))
def list_calls(self):
for c in self.calls:
print(c)
def call(self):
"""Execute added multicall calls
@return: the results (post-processed), in the order they were added
@rtype: tuple
"""
m = xmlrpclib.MultiCall(self.rt_obj._get_conn())
for call in self.calls:
method, args = call
rpc_call = getattr(method, "rpc_call")
getattr(m, rpc_call)(*args)
results = m()
results = tuple(results)
results_processed = []
for r, c in zip(results, self.calls):
method = c[0] # Method instance
result = process_result(method, r)
results_processed.append(result)
# assign result to class_obj
exists = hasattr(self.class_obj, method.varname)
if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)):
setattr(self.class_obj, method.varname, result)
return(tuple(results_processed))
def call_method(class_obj, method, *args):
"""Handles single RPC calls
@param class_obj: Peer/File/Torrent/Tracker/RTorrent instance
@type class_obj: object
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
"""
if method.is_retriever():
args = args[:-1]
else:
assert args[-1] is not None, "No argument given."
if class_obj.__class__.__name__ == "RTorrent":
rt_obj = class_obj
else:
rt_obj = class_obj._rt_obj
# check if rpc method is even available
if not method.is_available(rt_obj):
_handle_unavailable_rpc_method(method, rt_obj)
m = Multicall(class_obj)
m.add(method, *args)
# only added one method, only getting one result back
ret_value = m.call()[0]
####### OBSOLETE ##########################################################
# if method.is_retriever():
# #value = process_result(method, ret_value)
# value = ret_value #MultiCall already processed the result
# else:
# # we're setting the user's input to method.varname
# # but we'll return the value that xmlrpc gives us
# value = process_result(method, args[-1])
##########################################################################
return(ret_value)
def find_method(rpc_call):
"""Return L{Method} instance associated with given RPC call"""
method_lists = [
rtorrent.methods,
rtorrent.file.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
rtorrent.torrent.methods,
]
for l in method_lists:
for m in l:
if m.rpc_call.lower() == rpc_call.lower():
return(m)
return(-1)
def process_result(method, result):
"""Process given C{B{result}} based on flags set in C{B{method}}
@param method: L{Method} instance
@type method: Method
@param result: result to be processed (the result of given L{Method} instance)
@note: Supported Processing:
- boolean - convert ones and zeros returned by rTorrent and
convert to python boolean values
"""
# handle custom post processing function
if method.post_process_func is not None:
result = method.post_process_func(result)
# is boolean?
if method.boolean:
if result in [1, '1']:
result = True
elif result in [0, '0']:
result = False
return(result)
def _build_rpc_methods(class_, method_list):
"""Build glorified aliases to raw RPC methods"""
instance = None
if not inspect.isclass(class_):
instance = class_
class_ = instance.__class__
for m in method_list:
class_name = m.class_name
if class_name != class_.__name__:
continue
if class_name == "RTorrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, bool_to_int(arg))
elif class_name == "Torrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name in ["Tracker", "File"]:
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Peer":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Group":
caller = lambda arg = None, method = m: \
call_method(instance, method, bool_to_int(arg))
if m.docstring is None:
m.docstring = ""
# print(m)
docstring = """{0}
@note: Variable where the result for this method is stored: {1}.{2}""".format(
m.docstring,
class_name,
m.varname)
caller.__doc__ = docstring
for method_name in [m.method_name] + list(m.aliases):
if instance is None:
setattr(class_, method_name, caller)
else:
setattr(instance, method_name, caller)
|
gitsimon/tq_website | refs/heads/master | cms_plugins/migrations/0003_delete_thumbnailpluginmodel.py | 2 | # Generated by Django 2.2.12 on 2020-04-29 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0022_auto_20180620_1551'),
('cms_plugins', '0002_auto_20200215_1336'),
]
operations = [
migrations.DeleteModel(
name='ThumbnailPluginModel',
),
]
|
zakandrewking/cobrapy | refs/heads/master | cobra/test/data/update_pickles.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import OrderedDict
from json import dump as json_dump
import cobra
from cobra.io import (
load_matlab_model, read_sbml_model, save_json_model, save_matlab_model,
write_sbml_model)
from cobra.io.sbml3 import write_sbml2
# This script regenerates pickles of cobra Models. Should be
# performed after updating core classes to prevent subtle bugs.
try:
from cPickle import load, dump
except:
from pickle import load, dump
# ecoli
ecoli_model = read_sbml_model("iJO1366.xml")
with open("iJO1366.pickle", "wb") as outfile:
dump(ecoli_model, outfile, protocol=2)
# salmonella
salmonella = read_sbml_model("salmonella.xml")
with open("salmonella.genes", "rb") as infile:
gene_names = load(infile)
for gene in salmonella.genes:
gene.name = gene_names[gene.id]
with open("salmonella.media", "rb") as infile:
salmonella.media_compositions = load(infile)
with open("salmonella.pickle", "wb") as outfile:
dump(salmonella, outfile, protocol=2)
# create mini model from textbook
textbook = read_sbml_model("textbook.xml.gz")
mini = cobra.Model("mini_textbook")
mini.compartments = textbook.compartments
for r in textbook.reactions:
if r.id in ("GLCpts", "PGI", "PFK", "FBA", "TPI", "GAPD", "PGK", "PGM",
"ENO", "PYK", "EX_glc__D_e", "EX_h_e", "H2Ot", "ATPM",
"PIt2r"):
mini.add_reaction(r.copy())
mini.reactions.ATPM.upper_bound = mini.reactions.PGI.upper_bound
mini.objective = ["PFK", "ATPM"] # No biomass, 2 reactions
# add in some information from iJO1366
mini.add_reaction(ecoli_model.reactions.LDH_D.copy())
mini.add_reaction(ecoli_model.reactions.EX_lac__D_e.copy())
r = cobra.Reaction("D_LACt2")
mini.add_reaction(r)
r.gene_reaction_rule = ecoli_model.reactions.D__LACt2pp.gene_reaction_rule
r.reaction = ecoli_model.reactions.D__LACt2pp.reaction.replace("_p", "_e")
mini.reactions.GLCpts.gene_reaction_rule = \
ecoli_model.reactions.GLCptspp.gene_reaction_rule
# adjust bounds
for i in ["ATPM", "D_LACt2", "EX_lac__D_e", "LDH_D"]:
mini.reactions.get_by_id(i).upper_bound = mini.reactions.PGI.upper_bound
for i in ["D_LACt2", "LDH_D"]:
mini.reactions.get_by_id(i).lower_bound = mini.reactions.PGI.lower_bound
# set names and annotation
for g in mini.genes:
try:
tg = textbook.genes.get_by_id(g.id)
except KeyError:
continue
g.name = tg.name
g.annotation = tg.annotation
mini.reactions.sort()
mini.genes.sort()
mini.metabolites.sort()
# output to various formats
with open("mini.pickle", "wb") as outfile:
dump(mini, outfile, protocol=2)
save_matlab_model(mini, "mini.mat")
save_json_model(mini, "mini.json", pretty=True)
write_sbml_model(mini, "mini_fbc2.xml")
write_sbml_model(mini, "mini_fbc2.xml.bz2")
write_sbml_model(mini, "mini_fbc2.xml.gz")
write_sbml2(mini, "mini_fbc1.xml", use_fbc_package=True)
write_sbml_model(mini, "mini_cobra.xml", use_fbc_package=False)
raven = load_matlab_model("raven.mat")
with open("raven.pickle", "wb") as outfile:
dump(raven, outfile, protocol=2)
# TODO:these need a reference solutions rather than circular solution checking!
# fva results
fva_result = cobra.flux_analysis.flux_variability_analysis(textbook)
clean_result = OrderedDict()
for key in sorted(fva_result):
clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
with open("textbook_fva.json", "w") as outfile:
json_dump(clean_result, outfile)
# fva with pfba constraint
fva_result = cobra.flux_analysis.flux_variability_analysis(textbook,
pfba_factor=1.1)
clean_result = OrderedDict()
for key in sorted(fva_result):
clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
with open("textbook_pfba_fva.json", "w") as outfile:
json_dump(clean_result, outfile)
# textbook solution
solution = cobra.flux_analysis.parsimonious.pfba(textbook)
with open('textbook_solution.pickle', 'wb') as f:
dump(solution, f, protocol=2)
|
andremissaglia/ekaaty_liveusbcreator | refs/heads/master | liveusb/setup.py | 2 | from distutils.core import setup
def main():
setup(
name='gettext_windows',
version='1.0',
py_modules=['gettext_windows'],
author='Alexander Belchenko',
author_email='bialix@ukr.net',
url='https://launchpad.net/gettext-py-windows',
description='Helper for standard gettext.py on Windows',
long_description="""Helper for standard gettext.py on Windows.
Module obtains user language code on Windows to use with standard
Python gettext.py library.""",
keywords='gettext Windows',
license='MIT',
)
if __name__ == '__main__':
main()
|
xaviercobain88/framework-python | refs/heads/master | openerp/tools/func.py | 63 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized']
from functools import wraps
from inspect import getsourcefile
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
doismellburning/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/users/urls.py | 130 | """
URLs for user API
"""
from django.conf.urls import patterns, url
from django.conf import settings
from .views import UserDetail, UserCourseEnrollmentsList, UserCourseStatus
USERNAME_PATTERN = r'(?P<username>[\w.+-]+)'
urlpatterns = patterns(
'mobile_api.users.views',
url('^' + USERNAME_PATTERN + '$', UserDetail.as_view(), name='user-detail'),
url(
'^' + USERNAME_PATTERN + '/course_enrollments/$',
UserCourseEnrollmentsList.as_view(),
name='courseenrollment-detail'
),
url('^{}/course_status_info/{}'.format(USERNAME_PATTERN, settings.COURSE_ID_PATTERN),
UserCourseStatus.as_view(),
name='user-course-status')
)
|
orangeduck/PyAutoC | refs/heads/master | Python27/Lib/plat-irix6/FL.py | 132 | # Constants used by the FORMS library (module fl).
# This corresponds to "forms.h".
# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
from warnings import warnpy3k
warnpy3k("the FL module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
_v20 = 1
_v21 = 1
##import fl
##try:
## _v20 = (fl.get_rgbmode is not None)
##except:
## _v20 = 0
##del fl
NULL = 0
FALSE = 0
TRUE = 1
EVENT = -1
LABEL_SIZE = 64
if _v20:
SHORTCUT_SIZE = 32
PLACE_FREE = 0
PLACE_SIZE = 1
PLACE_ASPECT = 2
PLACE_MOUSE = 3
PLACE_CENTER = 4
PLACE_POSITION = 5
FL_PLACE_FULLSCREEN = 6
FIND_INPUT = 0
FIND_AUTOMATIC = 1
FIND_MOUSE = 2
BEGIN_GROUP = 10000
END_GROUP = 20000
ALIGN_TOP = 0
ALIGN_BOTTOM = 1
ALIGN_LEFT = 2
ALIGN_RIGHT = 3
ALIGN_CENTER = 4
NO_BOX = 0
UP_BOX = 1
DOWN_BOX = 2
FLAT_BOX = 3
BORDER_BOX = 4
SHADOW_BOX = 5
FRAME_BOX = 6
ROUNDED_BOX = 7
RFLAT_BOX = 8
RSHADOW_BOX = 9
TOP_BOUND_COL = 51
LEFT_BOUND_COL = 55
BOT_BOUND_COL = 40
RIGHT_BOUND_COL = 35
COL1 = 47
MCOL = 49
LCOL = 0
BOUND_WIDTH = 3.0
DRAW = 0
PUSH = 1
RELEASE = 2
ENTER = 3
LEAVE = 4
MOUSE = 5
FOCUS = 6
UNFOCUS = 7
KEYBOARD = 8
STEP = 9
MOVE = 10
FONT_NAME = 'Helvetica'
FONT_BOLDNAME = 'Helvetica-Bold'
FONT_ITALICNAME = 'Helvetica-Oblique'
FONT_FIXEDNAME = 'Courier'
FONT_ICONNAME = 'Icon'
SMALL_FONT = 8.0
NORMAL_FONT = 11.0
LARGE_FONT = 20.0
NORMAL_STYLE = 0
BOLD_STYLE = 1
ITALIC_STYLE = 2
FIXED_STYLE = 3
ENGRAVED_STYLE = 4
ICON_STYLE = 5
BITMAP = 3
NORMAL_BITMAP = 0
BITMAP_BOXTYPE = NO_BOX
BITMAP_COL1 = 0
BITMAP_COL2 = COL1
BITMAP_LCOL = LCOL
BITMAP_ALIGN = ALIGN_BOTTOM
BITMAP_MAXSIZE = 128*128
BITMAP_BW = BOUND_WIDTH
BOX = 1
BOX_BOXTYPE = UP_BOX
BOX_COL1 = COL1
BOX_LCOL = LCOL
BOX_ALIGN = ALIGN_CENTER
BOX_BW = BOUND_WIDTH
BROWSER = 71
NORMAL_BROWSER = 0
SELECT_BROWSER = 1
HOLD_BROWSER = 2
MULTI_BROWSER = 3
BROWSER_BOXTYPE = DOWN_BOX
BROWSER_COL1 = COL1
BROWSER_COL2 = 3
BROWSER_LCOL = LCOL
BROWSER_ALIGN = ALIGN_BOTTOM
BROWSER_SLCOL = COL1
BROWSER_BW = BOUND_WIDTH
BROWSER_LINELENGTH = 128
BROWSER_MAXLINE = 512
BUTTON = 11
NORMAL_BUTTON = 0
PUSH_BUTTON = 1
RADIO_BUTTON = 2
HIDDEN_BUTTON = 3
TOUCH_BUTTON = 4
INOUT_BUTTON = 5
RETURN_BUTTON = 6
if _v20:
HIDDEN_RET_BUTTON = 7
BUTTON_BOXTYPE = UP_BOX
BUTTON_COL1 = COL1
BUTTON_COL2 = COL1
BUTTON_LCOL = LCOL
BUTTON_ALIGN = ALIGN_CENTER
BUTTON_MCOL1 = MCOL
BUTTON_MCOL2 = MCOL
BUTTON_BW = BOUND_WIDTH
if _v20:
CHART = 4
BAR_CHART = 0
HORBAR_CHART = 1
LINE_CHART = 2
FILLED_CHART = 3
SPIKE_CHART = 4
PIE_CHART = 5
SPECIALPIE_CHART = 6
CHART_BOXTYPE = BORDER_BOX
CHART_COL1 = COL1
CHART_LCOL = LCOL
CHART_ALIGN = ALIGN_BOTTOM
CHART_BW = BOUND_WIDTH
CHART_MAX = 128
CHOICE = 42
NORMAL_CHOICE = 0
CHOICE_BOXTYPE = DOWN_BOX
CHOICE_COL1 = COL1
CHOICE_COL2 = LCOL
CHOICE_LCOL = LCOL
CHOICE_ALIGN = ALIGN_LEFT
CHOICE_BW = BOUND_WIDTH
CHOICE_MCOL = MCOL
CHOICE_MAXITEMS = 128
CHOICE_MAXSTR = 64
CLOCK = 61
SQUARE_CLOCK = 0
ROUND_CLOCK = 1
CLOCK_BOXTYPE = UP_BOX
CLOCK_COL1 = 37
CLOCK_COL2 = 42
CLOCK_LCOL = LCOL
CLOCK_ALIGN = ALIGN_BOTTOM
CLOCK_TOPCOL = COL1
CLOCK_BW = BOUND_WIDTH
COUNTER = 25
NORMAL_COUNTER = 0
SIMPLE_COUNTER = 1
COUNTER_BOXTYPE = UP_BOX
COUNTER_COL1 = COL1
COUNTER_COL2 = 4
COUNTER_LCOL = LCOL
COUNTER_ALIGN = ALIGN_BOTTOM
if _v20:
COUNTER_BW = BOUND_WIDTH
else:
DEFAULT = 51
RETURN_DEFAULT = 0
ALWAYS_DEFAULT = 1
DIAL = 22
NORMAL_DIAL = 0
LINE_DIAL = 1
DIAL_BOXTYPE = NO_BOX
DIAL_COL1 = COL1
DIAL_COL2 = 37
DIAL_LCOL = LCOL
DIAL_ALIGN = ALIGN_BOTTOM
DIAL_TOPCOL = COL1
DIAL_BW = BOUND_WIDTH
FREE = 101
NORMAL_FREE = 1
SLEEPING_FREE = 2
INPUT_FREE = 3
CONTINUOUS_FREE = 4
ALL_FREE = 5
INPUT = 31
NORMAL_INPUT = 0
if _v20:
FLOAT_INPUT = 1
INT_INPUT = 2
HIDDEN_INPUT = 3
if _v21:
MULTILINE_INPUT = 4
SECRET_INPUT = 5
else:
ALWAYS_INPUT = 1
INPUT_BOXTYPE = DOWN_BOX
INPUT_COL1 = 13
INPUT_COL2 = 5
INPUT_LCOL = LCOL
INPUT_ALIGN = ALIGN_LEFT
INPUT_TCOL = LCOL
INPUT_CCOL = 4
INPUT_BW = BOUND_WIDTH
INPUT_MAX = 128
LIGHTBUTTON = 12
LIGHTBUTTON_BOXTYPE = UP_BOX
LIGHTBUTTON_COL1 = 39
LIGHTBUTTON_COL2 = 3
LIGHTBUTTON_LCOL = LCOL
LIGHTBUTTON_ALIGN = ALIGN_CENTER
LIGHTBUTTON_TOPCOL = COL1
LIGHTBUTTON_MCOL = MCOL
LIGHTBUTTON_BW1 = BOUND_WIDTH
LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
LIGHTBUTTON_MINSIZE = 12.0
MENU = 41
TOUCH_MENU = 0
PUSH_MENU = 1
MENU_BOXTYPE = BORDER_BOX
MENU_COL1 = 55
MENU_COL2 = 37
MENU_LCOL = LCOL
MENU_ALIGN = ALIGN_CENTER
MENU_BW = BOUND_WIDTH
MENU_MAX = 300
POSITIONER = 23
NORMAL_POSITIONER = 0
POSITIONER_BOXTYPE = DOWN_BOX
POSITIONER_COL1 = COL1
POSITIONER_COL2 = 1
POSITIONER_LCOL = LCOL
POSITIONER_ALIGN = ALIGN_BOTTOM
POSITIONER_BW = BOUND_WIDTH
ROUNDBUTTON = 13
ROUNDBUTTON_BOXTYPE = NO_BOX
ROUNDBUTTON_COL1 = 7
ROUNDBUTTON_COL2 = 3
ROUNDBUTTON_LCOL = LCOL
ROUNDBUTTON_ALIGN = ALIGN_CENTER
ROUNDBUTTON_TOPCOL = COL1
ROUNDBUTTON_MCOL = MCOL
ROUNDBUTTON_BW = BOUND_WIDTH
SLIDER = 21
VALSLIDER = 24
VERT_SLIDER = 0
HOR_SLIDER = 1
VERT_FILL_SLIDER = 2
HOR_FILL_SLIDER = 3
VERT_NICE_SLIDER = 4
HOR_NICE_SLIDER = 5
SLIDER_BOXTYPE = DOWN_BOX
SLIDER_COL1 = COL1
SLIDER_COL2 = COL1
SLIDER_LCOL = LCOL
SLIDER_ALIGN = ALIGN_BOTTOM
SLIDER_BW1 = BOUND_WIDTH
SLIDER_BW2 = BOUND_WIDTH*0.75
SLIDER_FINE = 0.05
SLIDER_WIDTH = 0.08
TEXT = 2
NORMAL_TEXT = 0
TEXT_BOXTYPE = NO_BOX
TEXT_COL1 = COL1
TEXT_LCOL = LCOL
TEXT_ALIGN = ALIGN_LEFT
TEXT_BW = BOUND_WIDTH
TIMER = 62
NORMAL_TIMER = 0
VALUE_TIMER = 1
HIDDEN_TIMER = 2
TIMER_BOXTYPE = DOWN_BOX
TIMER_COL1 = COL1
TIMER_COL2 = 1
TIMER_LCOL = LCOL
TIMER_ALIGN = ALIGN_CENTER
TIMER_BW = BOUND_WIDTH
TIMER_BLINKRATE = 0.2
|
gpapakyriakopoulos/PyFlask | refs/heads/master | pyflask/lib/python2.7/site-packages/jinja2/testsuite/__init__.py | 404 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.teardown()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception as e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, bytecode_cache, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
suite.addTest(bytecode_cache.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if PY2:
suite.addTest(doctests.suite())
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
|
kernc/networkx | refs/heads/master | networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py | 89 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx
from nose.plugins.attrib import attr
from networkx import edge_current_flow_betweenness_centrality \
as edge_current_flow
from networkx import approximate_current_flow_betweenness_centrality \
as approximate_cfbc
class TestFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
b_answer={0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
G.add_edge(0,1,{'weight':0.5,'other':0.3})
b=networkx.current_flow_betweenness_centrality(G,normalized=True,weight=None)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
wb_answer={0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555}
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],wb_answer[n])
wb_answer={0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358}
b=networkx.current_flow_betweenness_centrality(G,normalized=True,weight='other')
for n in sorted(G):
assert_almost_equal(b[n],wb_answer[n])
def test_K4(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
for solver in ['full','lu','cg']:
b=networkx.current_flow_betweenness_centrality(G, normalized=False,
solver=solver)
b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4_normalized(self):
"""Betweenness centrality: P4 normalized"""
G=networkx.path_graph(4)
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
b_answer={0: 0, 1: 2./3, 2: 2./3, 3:0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Betweenness centrality: P4"""
G=networkx.path_graph(4)
b=networkx.current_flow_betweenness_centrality(G,normalized=False)
b_answer={0: 0, 1: 2, 2: 2, 3: 0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_star(self):
"""Betweenness centrality: star """
G=networkx.Graph()
G.add_star(['a','b','c','d'])
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
b_answer={'a': 1.0, 'b': 0.0, 'c': 0.0, 'd':0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_solers(self):
"""Betweenness centrality: alternate solvers"""
G=networkx.complete_graph(4)
for solver in ['full','lu','cg']:
b=networkx.current_flow_betweenness_centrality(G,normalized=False,
solver=solver)
b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
class TestApproximateFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
global assert_allclose
try:
import numpy as np
import scipy
from numpy.testing import assert_allclose
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"Approximate current-flow betweenness centrality: K4 normalized"
G=networkx.complete_graph(4)
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
epsilon=0.1
ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
for n in sorted(G):
assert_allclose(b[n],ba[n],atol=epsilon)
def test_K4(self):
"Approximate current-flow betweenness centrality: K4"
G=networkx.complete_graph(4)
b=networkx.current_flow_betweenness_centrality(G,normalized=False)
epsilon=0.1
ba = approximate_cfbc(G,normalized=False, epsilon=0.5*epsilon)
for n in sorted(G):
assert_allclose(b[n],ba[n],atol=epsilon*len(G)**2)
def test_star(self):
"Approximate current-flow betweenness centrality: star"
G=networkx.Graph()
G.add_star(['a','b','c','d'])
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
epsilon=0.1
ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
for n in sorted(G):
assert_allclose(b[n],ba[n],atol=epsilon)
def test_grid(self):
"Approximate current-flow betweenness centrality: 2d grid"
G=networkx.grid_2d_graph(4,4)
b=networkx.current_flow_betweenness_centrality(G,normalized=True)
epsilon=0.1
ba = approximate_cfbc(G,normalized=True, epsilon=0.5*epsilon)
for n in sorted(G):
assert_allclose(b[n],ba[n],atol=epsilon)
def test_solvers(self):
"Approximate current-flow betweenness centrality: solvers"
G=networkx.complete_graph(4)
epsilon=0.1
for solver in ['full','lu','cg']:
b=approximate_cfbc(G,normalized=False,solver=solver,
epsilon=0.5*epsilon)
b_answer={0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
for n in sorted(G):
assert_allclose(b[n],b_answer[n],atol=epsilon)
class TestWeightedFlowBetweennessCentrality(object):
pass
class TestEdgeFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4(self):
"""Edge flow betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=edge_current_flow(G,normalized=True)
b_answer=dict.fromkeys(G.edges(),0.25)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_K4_normalized(self):
"""Edge flow betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=edge_current_flow(G,normalized=False)
b_answer=dict.fromkeys(G.edges(),0.75)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_C4(self):
"""Edge flow betweenness centrality: C4"""
G=networkx.cycle_graph(4)
b=edge_current_flow(G,normalized=False)
b_answer={(0, 1):1.25,(0, 3):1.25, (1, 2):1.25, (2, 3): 1.25}
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G=networkx.path_graph(4)
b=edge_current_flow(G,normalized=False)
b_answer={(0, 1):1.5,(1, 2):2.0, (2, 3):1.5}
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
|
fatiando/fatiando | refs/heads/master | cookbook/gravmag_tensor_prism_noisy.py | 9 | """
GravMag: Generate noise-corrupted gravity gradient tensor data
"""
from fatiando import mesher, gridder, utils
from fatiando.gravmag import prism
from fatiando.vis import mpl
model = [mesher.Prism(-1000, 1000, -1000, 1000, 0, 2000, {'density': 1000})]
shape = (100, 100)
xp, yp, zp = gridder.regular((-5000, 5000, -5000, 5000), shape, z=-200)
components = [prism.gxx, prism.gxy, prism.gxz,
prism.gyy, prism.gyz, prism.gzz]
print "Calculate the tensor components and contaminate with 5 Eotvos noise"
ftg = [utils.contaminate(comp(xp, yp, zp, model), 5.0) for comp in components]
print "Plotting..."
mpl.figure(figsize=(14, 6))
mpl.suptitle("Contaminated FTG data")
names = ['gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz']
for i, data in enumerate(ftg):
mpl.subplot(2, 3, i + 1)
mpl.title(names[i])
mpl.axis('scaled')
levels = mpl.contourf(xp * 0.001, yp * 0.001, data, (100, 100), 12)
mpl.colorbar()
mpl.contour(xp * 0.001, yp * 0.001, data, shape, levels, clabel=False)
mpl.show()
|
griffinfoster/pulsar-polarization-sims | refs/heads/master | scripts/smoothProfile.py | 1 | #!/usr/bin/env python
"""
Apply a low pass filter to a pulsar profile
"""
#broaden filter
import pyfits as pf
import numpy as n
import pylab as p
import os
import sys
import shutil
import time
from scipy import signal
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output: the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=n.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=n.ones(window_len,'d')
else:
w=eval('n.'+window+'(window_len)')
y=n.convolve(w/w.sum(),s,mode='valid')
#return y
return y[(window_len/2-1):-(window_len/2+1)]
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [FITS file]')
o.set_description(__doc__)
o.add_option('-r', '--rot', dest='rot', action='store_true',
help='Rotate the profile by 0.5 of the phase')
o.add_option('-w','--win_len',dest='win_len',default=11,type='int',
help='Window smoothing size, should be odd, default:11')
o.add_option('-s','--save',dest='save',action='store_true',
help='Save the smoothed profile to a new fits file')
o.add_option('-S','--shift',dest='shift',default=0, type='int',
help='Shift the smoothed profile to the left N values default:0')
opts, args = o.parse_args(sys.argv[1:])
hdulist=pf.open(args[0])
#print hdulist.info()
primary=hdulist['PRIMARY'].header
print primary['FITSTYPE']
#see www.atnf.csiro.au/research/pulsar/psrfists/fitsdef.html section: Subintegration data
d=hdulist[3].data
#print d
offsets=d[0][-3]
sclFactor=d[0][-2]
data=d[0][-1]
#print sclFactor
#print offsets
#print data.shape
if len(data.shape)==1:
data.shape=(4,1,data.shape[-1]/4)
#print data.shape
dout=n.zeros_like(data, dtype=n.float32)
for sid,stokes in enumerate(sclFactor): dout[sid,0,:]=data[sid,0,:].astype(n.float32)*sclFactor[sid]+offsets[sid]
xvals=n.arange(dout.shape[2],dtype=n.float32)
hdulist.close()
if opts.rot: dout=n.roll(dout, dout.shape[2]/2, axis=2)
##LOW PASS FILTER
#ntaps=dout.shape[2]
#cutoff=opts.cutoff
#fir=signal.firwin(ntaps,cutoff)
#ifilter=n.convolve(dout[0,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#qfilter=n.convolve(dout[1,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#ufilter=n.convolve(dout[2,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#vfilter=n.convolve(dout[3,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#SMOOTHING
ifilter=smooth(dout[0,0,:],window_len=opts.win_len)
qfilter=smooth(dout[1,0,:],window_len=opts.win_len)
ufilter=smooth(dout[2,0,:],window_len=opts.win_len)
vfilter=smooth(dout[3,0,:],window_len=opts.win_len)
#SHIFTING
if not (opts.shift==0):
shift=-1*opts.shift
print 'Applying a shift of %i units'%shift
ifilter=n.roll(ifilter,shift)
qfilter=n.roll(qfilter,shift)
ufilter=n.roll(ufilter,shift)
vfilter=n.roll(vfilter,shift)
if opts.save:
dirname,basename=os.path.split(os.path.abspath(args[0]))
outputname=basename.split('.fits')[0]+'.smooth.fits'
outputname=dirname+'/'+outputname
shutil.copy(os.path.abspath(args[0]),outputname)
time.sleep(.1)
hdulist=pf.open(outputname,mode='update')
dwrite=n.zeros_like(dout)
dwrite[0,0,:]=(ifilter-offsets[0])/sclFactor[0]
dwrite[1,0,:]=(qfilter-offsets[1])/sclFactor[1]
dwrite[2,0,:]=(ufilter-offsets[2])/sclFactor[2]
dwrite[3,0,:]=(vfilter-offsets[3])/sclFactor[3]
if opts.rot: dwrite=n.roll(dwrite, -dwrite.shape[2]/2, axis=2)
#dwrite=dwrite.flatten()
dDict=hdulist[3].data
print dwrite.shape
dDict[0][-1]=dwrite
hdulist[3].data=dDict
hdulist.flush()
hdulist.close()
#p.subplot(221)
#p.plot((ifilter-offsets[0])/sclFactor[0])
#p.plot((dout[0,0,:]-offsets[0])/sclFactor[0])
#
#p.subplot(222)
#p.plot((qfilter-offsets[1])/sclFactor[1])
#p.plot((dout[1,0,:]-offsets[1])/sclFactor[1])
#
#p.subplot(223)
#p.plot((ufilter-offsets[2])/sclFactor[2])
#p.plot((dout[2,0,:]-offsets[2])/sclFactor[2])
#p.subplot(224)
#p.plot((vfilter-offsets[3])/sclFactor[3])
#p.plot((dout[3,0,:]-offsets[3])/sclFactor[3])
#p.show()
|
munyirik/python | refs/heads/develop | cpython/PC/testpy.py | 89 | import sys
# This is a test module for Python. It looks in the standard
# places for various *.py files. If these are moved, you must
# change this module too.
try:
import os
except:
print("""Could not import the standard "os" module.
Please check your PYTHONPATH environment variable.""")
sys.exit(1)
try:
import symbol
except:
print("""Could not import the standard "symbol" module. If this is
a PC, you should add the dos_8x3 directory to your PYTHONPATH.""")
sys.exit(1)
import os
for dir in sys.path:
file = os.path.join(dir, "os.py")
if os.path.isfile(file):
test = os.path.join(dir, "test")
if os.path.isdir(test):
# Add the "test" directory to PYTHONPATH.
sys.path = sys.path + [test]
import regrtest # Standard Python tester.
regrtest.main()
|
mistercrunch/airflow | refs/heads/master | airflow/operators/latest_only.py | 8 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains an operator to run downstream tasks only for the
latest scheduled DagRun
"""
from typing import Dict, Iterable, Union
import pendulum
from airflow.operators.branch import BaseBranchOperator
class LatestOnlyOperator(BaseBranchOperator):
"""
Allows a workflow to skip tasks that are not running during the most
recent schedule interval.
If the task is run outside of the latest schedule interval (i.e. external_trigger),
all directly downstream tasks will be skipped.
Note that downstream tasks are never skipped if the given DAG_Run is
marked as externally triggered.
"""
ui_color = '#e9ffdb' # nyanza
def choose_branch(self, context: Dict) -> Union[str, Iterable[str]]:
# If the DAG Run is externally triggered, then return without
# skipping downstream tasks
if context['dag_run'] and context['dag_run'].external_trigger:
self.log.info("Externally triggered DAG_Run: allowing execution to proceed.")
return list(context['task'].get_direct_relative_ids(upstream=False))
now = pendulum.now('UTC')
left_window = context['dag'].following_schedule(context['execution_date'])
right_window = context['dag'].following_schedule(left_window)
self.log.info(
'Checking latest only with left_window: %s right_window: %s now: %s',
left_window,
right_window,
now,
)
if not left_window < now <= right_window:
self.log.info('Not latest execution, skipping downstream.')
# we return an empty list, thus the parent BaseBranchOperator
# won't exclude any downstream tasks from skipping.
return []
else:
self.log.info('Latest, allowing execution to proceed.')
return list(context['task'].get_direct_relative_ids(upstream=False))
|
laiqiqi886/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/test_tracemalloc.py | 60 | import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.script_helper import assert_python_ok, assert_python_failure
from test import support
try:
import threading
except ImportError:
threading = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
raw_traces = [
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(2, (('a.py', 5), ('b.py', 4))),
(66, (('b.py', 1),)),
(7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(2, (('a.py', 5), ('b.py', 4))),
(5000, (('a.py', 5), ('b.py', 4))),
(400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[1] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
# dummy wrappers to get more useful and identical frames in the traceback
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
# Ensure that two identical tracebacks are not duplicated
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
size1, traceback1 = trace1
size2, traceback2 = trace2
self.assertEqual(traceback2, traceback1)
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
# take a snapshot
snapshot = tracemalloc.take_snapshot()
# write on disk
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load from disk
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
# tracemalloc must be tracing memory allocations to take a snapshot
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
# take a snapshot with a new attribute
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load() should recreates the attribute
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
# everything is fine
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
# check that tracemalloc is still working after fork
pid = os.fork()
if not pid:
# child
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
# exclude b.py
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(2, (('a.py', 5), ('b.py', 4))),
(7, (('<unknown>', 0),)),
])
# filter_traces() must not touch the original snapshot
self.assertEqual(snapshot.traces._traces, original_traces)
# only include two lines of a.py
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(10, (('a.py', 2), ('b.py', 4))),
(2, (('a.py', 5), ('b.py', 4))),
])
# No filter: just duplicate the snapshot
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
# stats per file and line
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
# stats per file and line (2)
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
# stats diff per file and line
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
# stats per file
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# stats per file (2)
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
# stats per file
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
# stats per file (2)
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
# per file
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# per line
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'a.py:2: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'a.py:2')
frame = traceback[0]
self.assertEqual(str(frame), 'a.py:2')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
# test default values
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
# test custom values
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# parameters passed by keyword
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# read-only attribute
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
# filter without line number
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number > 0
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number 0
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
# empty string
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
# no *
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
# a*
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
# a*b
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
# a*b*c
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
# replace .pyc and .pyo suffix with .py
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.pyo', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
self.assertTrue(fnmatch('a.py', 'a.pyo'))
if os.name == 'nt':
# case insensitive
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.PYO', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
self.assertTrue(fnmatch('a.PY', 'a.pyo'))
else:
# case sensitive
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.PYO', 'a.py'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
self.assertFalse(fnmatch('a.PY', 'a.pyo'))
if os.name == 'nt':
# normalize alternate separator "/" to the standard separator "\"
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
# there is no alternate separator
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var(self):
# not tracing by default
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
# PYTHON* environment variables must be ignored when -E option is
# present
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
# tracing at startup
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
# start and set the number of frames
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
self.assertIn(b'PYTHONTRACEMALLOC: invalid '
b'number of frames',
stderr)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
ok, stdout, stderr = assert_python_failure(*args)
self.assertIn(b'-X tracemalloc=NFRAME: invalid '
b'number of frames',
stderr)
def test_pymem_alloc0(self):
# Issue #21639: Check that PyMem_Malloc(0) with tracemalloc enabled
# does not crash.
code = 'import _testcapi; _testcapi.test_pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
)
if __name__ == "__main__":
test_main()
|
michael-dev2rights/ansible | refs/heads/ansible-d2r | test/units/module_utils/facts/other/test_ohai.py | 118 | # unit tests for ansible ohai fact collector
# -*- coding: utf-8 -*-
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests.mock import Mock, patch
from .. base import BaseFactsTest
from ansible.module_utils.facts.other.ohai import OhaiFactCollector
ohai_json_output = r'''
{
"kernel": {
"name": "Linux",
"release": "4.9.14-200.fc25.x86_64",
"version": "#1 SMP Mon Mar 13 19:26:40 UTC 2017",
"machine": "x86_64",
"processor": "x86_64",
"os": "GNU/Linux",
"modules": {
"binfmt_misc": {
"size": "20480",
"refcount": "1"
},
"veth": {
"size": "16384",
"refcount": "0"
},
"xfs": {
"size": "1200128",
"refcount": "1"
},
"xt_addrtype": {
"size": "16384",
"refcount": "2"
},
"br_netfilter": {
"size": "24576",
"refcount": "0"
},
"dm_thin_pool": {
"size": "65536",
"refcount": "2"
},
"dm_persistent_data": {
"size": "69632",
"refcount": "1"
},
"dm_bio_prison": {
"size": "16384",
"refcount": "1"
},
"libcrc32c": {
"size": "16384",
"refcount": "2"
},
"rfcomm": {
"size": "77824",
"refcount": "14",
"version": "1.11"
},
"fuse": {
"size": "102400",
"refcount": "3"
},
"ccm": {
"size": "20480",
"refcount": "2"
},
"xt_CHECKSUM": {
"size": "16384",
"refcount": "2"
},
"iptable_mangle": {
"size": "16384",
"refcount": "1"
},
"ipt_MASQUERADE": {
"size": "16384",
"refcount": "7"
},
"nf_nat_masquerade_ipv4": {
"size": "16384",
"refcount": "1"
},
"iptable_nat": {
"size": "16384",
"refcount": "1"
},
"nf_nat_ipv4": {
"size": "16384",
"refcount": "1"
},
"nf_nat": {
"size": "28672",
"refcount": "2"
},
"nf_conntrack_ipv4": {
"size": "16384",
"refcount": "4"
},
"nf_defrag_ipv4": {
"size": "16384",
"refcount": "1"
},
"xt_conntrack": {
"size": "16384",
"refcount": "3"
},
"nf_conntrack": {
"size": "106496",
"refcount": "5"
},
"ip6t_REJECT": {
"size": "16384",
"refcount": "2"
},
"nf_reject_ipv6": {
"size": "16384",
"refcount": "1"
},
"tun": {
"size": "28672",
"refcount": "4"
},
"bridge": {
"size": "135168",
"refcount": "1",
"version": "2.3"
},
"stp": {
"size": "16384",
"refcount": "1"
},
"llc": {
"size": "16384",
"refcount": "2"
},
"ebtable_filter": {
"size": "16384",
"refcount": "0"
},
"ebtables": {
"size": "36864",
"refcount": "1"
},
"ip6table_filter": {
"size": "16384",
"refcount": "1"
},
"ip6_tables": {
"size": "28672",
"refcount": "1"
},
"cmac": {
"size": "16384",
"refcount": "3"
},
"uhid": {
"size": "20480",
"refcount": "2"
},
"bnep": {
"size": "20480",
"refcount": "2",
"version": "1.3"
},
"btrfs": {
"size": "1056768",
"refcount": "1"
},
"xor": {
"size": "24576",
"refcount": "1"
},
"raid6_pq": {
"size": "106496",
"refcount": "1"
},
"loop": {
"size": "28672",
"refcount": "6"
},
"arc4": {
"size": "16384",
"refcount": "2"
},
"snd_hda_codec_hdmi": {
"size": "45056",
"refcount": "1"
},
"intel_rapl": {
"size": "20480",
"refcount": "0"
},
"x86_pkg_temp_thermal": {
"size": "16384",
"refcount": "0"
},
"intel_powerclamp": {
"size": "16384",
"refcount": "0"
},
"coretemp": {
"size": "16384",
"refcount": "0"
},
"kvm_intel": {
"size": "192512",
"refcount": "0"
},
"kvm": {
"size": "585728",
"refcount": "1"
},
"irqbypass": {
"size": "16384",
"refcount": "1"
},
"crct10dif_pclmul": {
"size": "16384",
"refcount": "0"
},
"crc32_pclmul": {
"size": "16384",
"refcount": "0"
},
"iTCO_wdt": {
"size": "16384",
"refcount": "0",
"version": "1.11"
},
"ghash_clmulni_intel": {
"size": "16384",
"refcount": "0"
},
"mei_wdt": {
"size": "16384",
"refcount": "0"
},
"iTCO_vendor_support": {
"size": "16384",
"refcount": "1",
"version": "1.04"
},
"iwlmvm": {
"size": "364544",
"refcount": "0"
},
"intel_cstate": {
"size": "16384",
"refcount": "0"
},
"uvcvideo": {
"size": "90112",
"refcount": "0",
"version": "1.1.1"
},
"videobuf2_vmalloc": {
"size": "16384",
"refcount": "1"
},
"intel_uncore": {
"size": "118784",
"refcount": "0"
},
"videobuf2_memops": {
"size": "16384",
"refcount": "1"
},
"videobuf2_v4l2": {
"size": "24576",
"refcount": "1"
},
"videobuf2_core": {
"size": "40960",
"refcount": "2"
},
"intel_rapl_perf": {
"size": "16384",
"refcount": "0"
},
"mac80211": {
"size": "749568",
"refcount": "1"
},
"videodev": {
"size": "172032",
"refcount": "3"
},
"snd_usb_audio": {
"size": "180224",
"refcount": "3"
},
"e1000e": {
"size": "249856",
"refcount": "0",
"version": "3.2.6-k"
}
}
},
"os": "linux",
"os_version": "4.9.14-200.fc25.x86_64",
"lsb": {
"id": "Fedora",
"description": "Fedora release 25 (Twenty Five)",
"release": "25",
"codename": "TwentyFive"
},
"platform": "fedora",
"platform_version": "25",
"platform_family": "fedora",
"packages": {
"ansible": {
"epoch": "0",
"version": "2.2.1.0",
"release": "1.fc25",
"installdate": "1486050042",
"arch": "noarch"
},
"python3": {
"epoch": "0",
"version": "3.5.3",
"release": "3.fc25",
"installdate": "1490025957",
"arch": "x86_64"
},
"kernel": {
"epoch": "0",
"version": "4.9.6",
"release": "200.fc25",
"installdate": "1486047522",
"arch": "x86_64"
},
"glibc": {
"epoch": "0",
"version": "2.24",
"release": "4.fc25",
"installdate": "1483402427",
"arch": "x86_64"
}
},
"chef_packages": {
ohai": {
"version": "13.0.0",
"ohai_root": "/home/some_user/.gem/ruby/gems/ohai-13.0.0/lib/ohai"
}
},
"dmi": {
"dmidecode_version": "3.0"
},
"uptime_seconds": 2509008,
"uptime": "29 days 00 hours 56 minutes 48 seconds",
"idletime_seconds": 19455087,
"idletime": "225 days 04 hours 11 minutes 27 seconds",
"memory": {
"swap": {
"cached": "262436kB",
"total": "8069116kB",
"free": "5154396kB"
},
"hugepages": {
"total": "0",
"free": "0",
"reserved": "0",
"surplus": "0"
},
"total": "16110540kB",
"free": "3825844kB",
"buffers": "377240kB",
"cached": "3710084kB",
"active": "8104320kB",
"inactive": "3192920kB",
"dirty": "812kB",
"writeback": "0kB",
"anon_pages": "7124992kB",
"mapped": "580700kB",
"slab": "622848kB",
"slab_reclaimable": "307300kB",
"slab_unreclaim": "315548kB",
"page_tables": "157572kB",
"nfs_unstable": "0kB",
"bounce": "0kB",
"commit_limit": "16124384kB",
"committed_as": "31345068kB",
"vmalloc_total": "34359738367kB",
"vmalloc_used": "0kB",
"vmalloc_chunk": "0kB",
"hugepage_size": "2048kB"
},
"filesystem": {
"by_device": {
"devtmpfs": {
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
],
"mounts": [
"/dev"
]
},
"tmpfs": {
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
],
"mounts": [
"/dev/shm",
"/run",
"/sys/fs/cgroup",
"/tmp",
"/run/user/0",
"/run/user/1000"
]
},
"/dev/mapper/fedora_host--186-root": {
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "12312331-3449-4a6c-8179-a1feb2bca6ce",
"mounts": [
"/",
"/var/lib/docker/devicemapper"
]
},
"/dev/sda1": {
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "12312311-ef40-4691-a3b6-438c3f9bc1c0",
"mounts": [
"/boot"
]
},
"/dev/mapper/fedora_host--186-home": {
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
"mounts": [
"/home"
]
},
"/dev/loop0": {
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
"mounts": [
"/var/lib/machines"
]
},
"sysfs": {
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"mounts": [
"/sys"
]
},
"proc": {
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"mounts": [
"/proc"
]
},
"securityfs": {
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"mounts": [
"/sys/kernel/security"
]
},
"devpts": {
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
],
"mounts": [
"/dev/pts"
]
},
"cgroup": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
],
"mounts": [
"/sys/fs/cgroup/systemd",
"/sys/fs/cgroup/devices",
"/sys/fs/cgroup/cpuset",
"/sys/fs/cgroup/perf_event",
"/sys/fs/cgroup/hugetlb",
"/sys/fs/cgroup/cpu,cpuacct",
"/sys/fs/cgroup/blkio",
"/sys/fs/cgroup/freezer",
"/sys/fs/cgroup/memory",
"/sys/fs/cgroup/pids",
"/sys/fs/cgroup/net_cls,net_prio"
]
},
"pstore": {
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"mounts": [
"/sys/fs/pstore"
]
},
"configfs": {
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/kernel/config"
]
},
"selinuxfs": {
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/fs/selinux"
]
},
"debugfs": {
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/sys/kernel/debug"
]
},
"hugetlbfs": {
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/dev/hugepages"
]
},
"mqueue": {
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/dev/mqueue"
]
},
"systemd-1": {
"fs_type": "autofs",
"mount_options": [
"rw",
"relatime",
"fd=40",
"pgrp=1",
"timeout=0",
"minproto=5",
"maxproto=5",
"direct",
"pipe_ino=17610"
],
"mounts": [
"/proc/sys/fs/binfmt_misc"
]
},
"/var/lib/machines.raw": {
"fs_type": "btrfs",
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
],
"mounts": [
"/var/lib/machines"
]
},
"fusectl": {
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/fs/fuse/connections"
]
},
"gvfsd-fuse": {
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
],
"mounts": [
"/run/user/1000/gvfs"
]
},
"binfmt_misc": {
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/proc/sys/fs/binfmt_misc"
]
},
"/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
"fs_type": "xfs",
"mount_options": [
"rw",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"nouuid",
"attr2",
"inode64",
"logbsize=64k",
"sunit=128",
"swidth=128",
"noquota"
],
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
"mounts": [
"/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
]
},
"shm": {
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"size=65536k"
],
"mounts": [
"/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm"
]
},
"nsfs": {
"fs_type": "nsfs",
"mount_options": [
"rw"
],
"mounts": [
"/run/docker/netns/1ce89fd79f3d"
]
},
"tracefs": {
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/kernel/debug/tracing"
]
},
"/dev/loop1": {
"fs_type": "xfs",
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
"mounts": [
]
},
"/dev/mapper/docker-253:1-1180487-pool": {
"mounts": [
]
},
"/dev/sr0": {
"mounts": [
]
},
"/dev/loop2": {
"mounts": [
]
},
"/dev/sda": {
"mounts": [
]
},
"/dev/sda2": {
"fs_type": "LVM2_member",
"uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK",
"mounts": [
]
},
"/dev/mapper/fedora_host--186-swap": {
"fs_type": "swap",
"uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d",
"mounts": [
]
}
},
"by_mountpoint": {
"/dev": {
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
],
"devices": [
"devtmpfs"
]
},
"/dev/shm": {
"kb_size": "8055268",
"kb_used": "96036",
"kb_available": "7959232",
"percent_used": "2%",
"total_inodes": "2013817",
"inodes_used": "217",
"inodes_available": "2013600",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
],
"devices": [
"tmpfs"
]
},
"/run": {
"kb_size": "8055268",
"kb_used": "2280",
"kb_available": "8052988",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "1070",
"inodes_available": "2012747",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel",
"mode=755"
],
"devices": [
"tmpfs"
]
},
"/sys/fs/cgroup": {
"kb_size": "8055268",
"kb_used": "0",
"kb_available": "8055268",
"percent_used": "0%",
"total_inodes": "2013817",
"inodes_used": "16",
"inodes_available": "2013801",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"ro",
"nosuid",
"nodev",
"noexec",
"seclabel",
"mode=755"
],
"devices": [
"tmpfs"
]
},
"/": {
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
"devices": [
"/dev/mapper/fedora_host--186-root"
]
},
"/tmp": {
"kb_size": "8055268",
"kb_used": "848396",
"kb_available": "7206872",
"percent_used": "11%",
"total_inodes": "2013817",
"inodes_used": "1353",
"inodes_available": "2012464",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
],
"devices": [
"tmpfs"
]
},
"/boot": {
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
"devices": [
"/dev/sda1"
]
},
"/home": {
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
"devices": [
"/dev/mapper/fedora_host--186-home"
]
},
"/var/lib/machines": {
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
"devices": [
"/dev/loop0",
"/var/lib/machines.raw"
],
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
]
},
"/run/user/0": {
"kb_size": "1611052",
"kb_used": "0",
"kb_available": "1611052",
"percent_used": "0%",
"total_inodes": "2013817",
"inodes_used": "7",
"inodes_available": "2013810",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700"
],
"devices": [
"tmpfs"
]
},
"/run/user/1000": {
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
],
"devices": [
"tmpfs"
]
},
"/sys": {
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"devices": [
"sysfs"
]
},
"/proc": {
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"devices": [
"proc"
]
},
"/sys/kernel/security": {
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"devices": [
"securityfs"
]
},
"/dev/pts": {
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
],
"devices": [
"devpts"
]
},
"/sys/fs/cgroup/systemd": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"xattr",
"release_agent=/usr/lib/systemd/systemd-cgroups-agent",
"name=systemd"
],
"devices": [
"cgroup"
]
},
"/sys/fs/pstore": {
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"devices": [
"pstore"
]
},
"/sys/fs/cgroup/devices": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"devices"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/cpuset": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpuset"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/perf_event": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"perf_event"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/hugetlb": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"hugetlb"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/cpu,cpuacct": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpu",
"cpuacct"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/blkio": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"blkio"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/freezer": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"freezer"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/memory": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"memory"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/pids": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"pids"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/net_cls,net_prio": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
],
"devices": [
"cgroup"
]
},
"/sys/kernel/config": {
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"configfs"
]
},
"/sys/fs/selinux": {
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"selinuxfs"
]
},
"/sys/kernel/debug": {
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"debugfs"
]
},
"/dev/hugepages": {
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"hugetlbfs"
]
},
"/dev/mqueue": {
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"mqueue"
]
},
"/proc/sys/fs/binfmt_misc": {
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"systemd-1",
"binfmt_misc"
]
},
"/sys/fs/fuse/connections": {
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"fusectl"
]
},
"/run/user/1000/gvfs": {
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
],
"devices": [
"gvfsd-fuse"
]
},
"/var/lib/docker/devicemapper": {
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
"devices": [
"/dev/mapper/fedora_host--186-root"
]
},
"/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
"fs_type": "xfs",
"mount_options": [
"rw",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"nouuid",
"attr2",
"inode64",
"logbsize=64k",
"sunit=128",
"swidth=128",
"noquota"
],
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
"devices": [
"/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
]
},
"/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"size=65536k"
],
"devices": [
"shm"
]
},
"/run/docker/netns/1ce89fd79f3d": {
"fs_type": "nsfs",
"mount_options": [
"rw"
],
"devices": [
"nsfs"
]
},
"/sys/kernel/debug/tracing": {
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"tracefs"
]
}
},
"by_pair": {
"devtmpfs,/dev": {
"device": "devtmpfs",
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"mount": "/dev",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
]
},
"tmpfs,/dev/shm": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "96036",
"kb_available": "7959232",
"percent_used": "2%",
"mount": "/dev/shm",
"total_inodes": "2013817",
"inodes_used": "217",
"inodes_available": "2013600",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
]
},
"tmpfs,/run": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "2280",
"kb_available": "8052988",
"percent_used": "1%",
"mount": "/run",
"total_inodes": "2013817",
"inodes_used": "1070",
"inodes_available": "2012747",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel",
"mode=755"
]
},
"tmpfs,/sys/fs/cgroup": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "0",
"kb_available": "8055268",
"percent_used": "0%",
"mount": "/sys/fs/cgroup",
"total_inodes": "2013817",
"inodes_used": "16",
"inodes_available": "2013801",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"ro",
"nosuid",
"nodev",
"noexec",
"seclabel",
"mode=755"
]
},
"/dev/mapper/fedora_host--186-root,/": {
"device": "/dev/mapper/fedora_host--186-root",
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"mount": "/",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
},
"tmpfs,/tmp": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "848396",
"kb_available": "7206872",
"percent_used": "11%",
"mount": "/tmp",
"total_inodes": "2013817",
"inodes_used": "1353",
"inodes_available": "2012464",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
]
},
"/dev/sda1,/boot": {
"device": "/dev/sda1",
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"mount": "/boot",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0"
},
"/dev/mapper/fedora_host--186-home,/home": {
"device": "/dev/mapper/fedora_host--186-home",
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"mount": "/home",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d"
},
"/dev/loop0,/var/lib/machines": {
"device": "/dev/loop0",
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"mount": "/var/lib/machines",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390"
},
"tmpfs,/run/user/0": {
"device": "tmpfs",
"kb_size": "1611052",
"kb_used": "0",
"kb_available": "1611052",
"percent_used": "0%",
"mount": "/run/user/0",
"total_inodes": "2013817",
"inodes_used": "7",
"inodes_available": "2013810",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700"
]
},
"tmpfs,/run/user/1000": {
"device": "tmpfs",
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"mount": "/run/user/1000",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
]
},
"sysfs,/sys": {
"device": "sysfs",
"mount": "/sys",
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
]
},
"proc,/proc": {
"device": "proc",
"mount": "/proc",
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
]
},
"securityfs,/sys/kernel/security": {
"device": "securityfs",
"mount": "/sys/kernel/security",
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
]
},
"devpts,/dev/pts": {
"device": "devpts",
"mount": "/dev/pts",
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
]
},
"cgroup,/sys/fs/cgroup/systemd": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/systemd",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"xattr",
"release_agent=/usr/lib/systemd/systemd-cgroups-agent",
"name=systemd"
]
},
"pstore,/sys/fs/pstore": {
"device": "pstore",
"mount": "/sys/fs/pstore",
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
]
},
"cgroup,/sys/fs/cgroup/devices": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/devices",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"devices"
]
},
"cgroup,/sys/fs/cgroup/cpuset": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/cpuset",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpuset"
]
},
"cgroup,/sys/fs/cgroup/perf_event": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/perf_event",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"perf_event"
]
},
"cgroup,/sys/fs/cgroup/hugetlb": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/hugetlb",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"hugetlb"
]
},
"cgroup,/sys/fs/cgroup/cpu,cpuacct": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/cpu,cpuacct",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpu",
"cpuacct"
]
},
"cgroup,/sys/fs/cgroup/blkio": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/blkio",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"blkio"
]
},
"cgroup,/sys/fs/cgroup/freezer": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/freezer",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"freezer"
]
},
"cgroup,/sys/fs/cgroup/memory": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/memory",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"memory"
]
},
"cgroup,/sys/fs/cgroup/pids": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/pids",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"pids"
]
},
"cgroup,/sys/fs/cgroup/net_cls,net_prio": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/net_cls,net_prio",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
]
},
"configfs,/sys/kernel/config": {
"device": "configfs",
"mount": "/sys/kernel/config",
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
]
},
"selinuxfs,/sys/fs/selinux": {
"device": "selinuxfs",
"mount": "/sys/fs/selinux",
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
]
},
"debugfs,/sys/kernel/debug": {
"device": "debugfs",
"mount": "/sys/kernel/debug",
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"hugetlbfs,/dev/hugepages": {
"device": "hugetlbfs",
"mount": "/dev/hugepages",
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"mqueue,/dev/mqueue": {
"device": "mqueue",
"mount": "/dev/mqueue",
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"systemd-1,/proc/sys/fs/binfmt_misc": {
"device": "systemd-1",
"mount": "/proc/sys/fs/binfmt_misc",
"fs_type": "autofs",
"mount_options": [
"rw",
"relatime",
"fd=40",
"pgrp=1",
"timeout=0",
"minproto=5",
"maxproto=5",
"direct",
"pipe_ino=17610"
]
},
"/var/lib/machines.raw,/var/lib/machines": {
"device": "/var/lib/machines.raw",
"mount": "/var/lib/machines",
"fs_type": "btrfs",
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
]
},
"fusectl,/sys/fs/fuse/connections": {
"device": "fusectl",
"mount": "/sys/fs/fuse/connections",
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
]
},
"gvfsd-fuse,/run/user/1000/gvfs": {
"device": "gvfsd-fuse",
"mount": "/run/user/1000/gvfs",
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
]
},
"/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": {
"device": "/dev/mapper/fedora_host--186-root",
"mount": "/var/lib/docker/devicemapper",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
},
"binfmt_misc,/proc/sys/fs/binfmt_misc": {
"device": "binfmt_misc",
"mount": "/proc/sys/fs/binfmt_misc",
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
]
},
"/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
"device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
"mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
"fs_type": "xfs",
"mount_options": [
"rw",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"nouuid",
"attr2",
"inode64",
"logbsize=64k",
"sunit=128",
"swidth=128",
"noquota"
],
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
},
"shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
"device": "shm",
"mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"size=65536k"
]
},
"nsfs,/run/docker/netns/1ce89fd79f3d": {
"device": "nsfs",
"mount": "/run/docker/netns/1ce89fd79f3d",
"fs_type": "nsfs",
"mount_options": [
"rw"
]
},
"tracefs,/sys/kernel/debug/tracing": {
"device": "tracefs",
"mount": "/sys/kernel/debug/tracing",
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
]
},
"/dev/loop1,": {
"device": "/dev/loop1",
"fs_type": "xfs",
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
},
"/dev/mapper/docker-253:1-1180487-pool,": {
"device": "/dev/mapper/docker-253:1-1180487-pool"
},
"/dev/sr0,": {
"device": "/dev/sr0"
},
"/dev/loop2,": {
"device": "/dev/loop2"
},
"/dev/sda,": {
"device": "/dev/sda"
},
"/dev/sda2,": {
"device": "/dev/sda2",
"fs_type": "LVM2_member",
"uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK"
},
"/dev/mapper/fedora_host--186-swap,": {
"device": "/dev/mapper/fedora_host--186-swap",
"fs_type": "swap",
"uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d"
}
}
},
"filesystem2": {
"by_device": {
"devtmpfs": {
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
],
"mounts": [
"/dev"
]
},
"tmpfs": {
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
],
"mounts": [
"/dev/shm",
"/run",
"/sys/fs/cgroup",
"/tmp",
"/run/user/0",
"/run/user/1000"
]
},
"/dev/mapper/fedora_host--186-root": {
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
"mounts": [
"/",
"/var/lib/docker/devicemapper"
]
},
"/dev/sda1": {
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
"mounts": [
"/boot"
]
},
"/dev/mapper/fedora_host--186-home": {
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
"mounts": [
"/home"
]
},
"/dev/loop0": {
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
"mounts": [
"/var/lib/machines"
]
},
"sysfs": {
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"mounts": [
"/sys"
]
},
"proc": {
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"mounts": [
"/proc"
]
},
"securityfs": {
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"mounts": [
"/sys/kernel/security"
]
},
"devpts": {
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
],
"mounts": [
"/dev/pts"
]
},
"cgroup": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
],
"mounts": [
"/sys/fs/cgroup/systemd",
"/sys/fs/cgroup/devices",
"/sys/fs/cgroup/cpuset",
"/sys/fs/cgroup/perf_event",
"/sys/fs/cgroup/hugetlb",
"/sys/fs/cgroup/cpu,cpuacct",
"/sys/fs/cgroup/blkio",
"/sys/fs/cgroup/freezer",
"/sys/fs/cgroup/memory",
"/sys/fs/cgroup/pids",
"/sys/fs/cgroup/net_cls,net_prio"
]
},
"pstore": {
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"mounts": [
"/sys/fs/pstore"
]
},
"configfs": {
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/kernel/config"
]
},
"selinuxfs": {
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/fs/selinux"
]
},
"debugfs": {
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/sys/kernel/debug"
]
},
"hugetlbfs": {
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/dev/hugepages"
]
},
"mqueue": {
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"mounts": [
"/dev/mqueue"
]
},
"systemd-1": {
"fs_type": "autofs",
"mount_options": [
"rw",
"relatime",
"fd=40",
"pgrp=1",
"timeout=0",
"minproto=5",
"maxproto=5",
"direct",
"pipe_ino=17610"
],
"mounts": [
"/proc/sys/fs/binfmt_misc"
]
},
"/var/lib/machines.raw": {
"fs_type": "btrfs",
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
],
"mounts": [
"/var/lib/machines"
]
},
"fusectl": {
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/fs/fuse/connections"
]
},
"gvfsd-fuse": {
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
],
"mounts": [
"/run/user/1000/gvfs"
]
},
"binfmt_misc": {
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/proc/sys/fs/binfmt_misc"
]
},
"/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
"fs_type": "xfs",
"mount_options": [
"rw",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"nouuid",
"attr2",
"inode64",
"logbsize=64k",
"sunit=128",
"swidth=128",
"noquota"
],
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
"mounts": [
"/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
]
},
"shm": {
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"size=65536k"
],
"mounts": [
"/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm"
]
},
"nsfs": {
"fs_type": "nsfs",
"mount_options": [
"rw"
],
"mounts": [
"/run/docker/netns/1ce89fd79f3d"
]
},
"tracefs": {
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
],
"mounts": [
"/sys/kernel/debug/tracing"
]
},
"/dev/loop1": {
"fs_type": "xfs",
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
"mounts": [
]
},
"/dev/mapper/docker-253:1-1180487-pool": {
"mounts": [
]
},
"/dev/sr0": {
"mounts": [
]
},
"/dev/loop2": {
"mounts": [
]
},
"/dev/sda": {
"mounts": [
]
},
"/dev/sda2": {
"fs_type": "LVM2_member",
"uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK",
"mounts": [
]
},
"/dev/mapper/fedora_host--186-swap": {
"fs_type": "swap",
"uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d",
"mounts": [
]
}
},
"by_mountpoint": {
"/dev": {
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
],
"devices": [
"devtmpfs"
]
},
"/dev/shm": {
"kb_size": "8055268",
"kb_used": "96036",
"kb_available": "7959232",
"percent_used": "2%",
"total_inodes": "2013817",
"inodes_used": "217",
"inodes_available": "2013600",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
],
"devices": [
"tmpfs"
]
},
"/run": {
"kb_size": "8055268",
"kb_used": "2280",
"kb_available": "8052988",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "1070",
"inodes_available": "2012747",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel",
"mode=755"
],
"devices": [
"tmpfs"
]
},
"/sys/fs/cgroup": {
"kb_size": "8055268",
"kb_used": "0",
"kb_available": "8055268",
"percent_used": "0%",
"total_inodes": "2013817",
"inodes_used": "16",
"inodes_available": "2013801",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"ro",
"nosuid",
"nodev",
"noexec",
"seclabel",
"mode=755"
],
"devices": [
"tmpfs"
]
},
"/": {
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
"devices": [
"/dev/mapper/fedora_host--186-root"
]
},
"/tmp": {
"kb_size": "8055268",
"kb_used": "848396",
"kb_available": "7206872",
"percent_used": "11%",
"total_inodes": "2013817",
"inodes_used": "1353",
"inodes_available": "2012464",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
],
"devices": [
"tmpfs"
]
},
"/boot": {
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
"devices": [
"/dev/sda1"
]
},
"/home": {
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
"devices": [
"/dev/mapper/fedora_host--186-home"
]
},
"/var/lib/machines": {
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
"devices": [
"/dev/loop0",
"/var/lib/machines.raw"
],
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
]
},
"/run/user/0": {
"kb_size": "1611052",
"kb_used": "0",
"kb_available": "1611052",
"percent_used": "0%",
"total_inodes": "2013817",
"inodes_used": "7",
"inodes_available": "2013810",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700"
],
"devices": [
"tmpfs"
]
},
"/run/user/1000": {
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
],
"devices": [
"tmpfs"
]
},
"/sys": {
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"devices": [
"sysfs"
]
},
"/proc": {
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"devices": [
"proc"
]
},
"/sys/kernel/security": {
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
],
"devices": [
"securityfs"
]
},
"/dev/pts": {
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
],
"devices": [
"devpts"
]
},
"/sys/fs/cgroup/systemd": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"xattr",
"release_agent=/usr/lib/systemd/systemd-cgroups-agent",
"name=systemd"
],
"devices": [
"cgroup"
]
},
"/sys/fs/pstore": {
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
],
"devices": [
"pstore"
]
},
"/sys/fs/cgroup/devices": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"devices"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/cpuset": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpuset"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/perf_event": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"perf_event"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/hugetlb": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"hugetlb"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/cpu,cpuacct": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpu",
"cpuacct"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/blkio": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"blkio"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/freezer": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"freezer"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/memory": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"memory"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/pids": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"pids"
],
"devices": [
"cgroup"
]
},
"/sys/fs/cgroup/net_cls,net_prio": {
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
],
"devices": [
"cgroup"
]
},
"/sys/kernel/config": {
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"configfs"
]
},
"/sys/fs/selinux": {
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"selinuxfs"
]
},
"/sys/kernel/debug": {
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"debugfs"
]
},
"/dev/hugepages": {
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"hugetlbfs"
]
},
"/dev/mqueue": {
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
],
"devices": [
"mqueue"
]
},
"/proc/sys/fs/binfmt_misc": {
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"systemd-1",
"binfmt_misc"
]
},
"/sys/fs/fuse/connections": {
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"fusectl"
]
},
"/run/user/1000/gvfs": {
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
],
"devices": [
"gvfsd-fuse"
]
},
"/var/lib/docker/devicemapper": {
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
"devices": [
"/dev/mapper/fedora_host--186-root"
]
},
{
"/run/docker/netns/1ce89fd79f3d": {
"fs_type": "nsfs",
"mount_options": [
"rw"
],
"devices": [
"nsfs"
]
},
"/sys/kernel/debug/tracing": {
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
],
"devices": [
"tracefs"
]
}
},
"by_pair": {
"devtmpfs,/dev": {
"device": "devtmpfs",
"kb_size": "8044124",
"kb_used": "0",
"kb_available": "8044124",
"percent_used": "0%",
"mount": "/dev",
"total_inodes": "2011031",
"inodes_used": "629",
"inodes_available": "2010402",
"inodes_percent_used": "1%",
"fs_type": "devtmpfs",
"mount_options": [
"rw",
"nosuid",
"seclabel",
"size=8044124k",
"nr_inodes=2011031",
"mode=755"
]
},
"tmpfs,/dev/shm": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "96036",
"kb_available": "7959232",
"percent_used": "2%",
"mount": "/dev/shm",
"total_inodes": "2013817",
"inodes_used": "217",
"inodes_available": "2013600",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
]
},
"tmpfs,/run": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "2280",
"kb_available": "8052988",
"percent_used": "1%",
"mount": "/run",
"total_inodes": "2013817",
"inodes_used": "1070",
"inodes_available": "2012747",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel",
"mode=755"
]
},
"tmpfs,/sys/fs/cgroup": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "0",
"kb_available": "8055268",
"percent_used": "0%",
"mount": "/sys/fs/cgroup",
"total_inodes": "2013817",
"inodes_used": "16",
"inodes_available": "2013801",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"ro",
"nosuid",
"nodev",
"noexec",
"seclabel",
"mode=755"
]
},
"/dev/mapper/fedora_host--186-root,/": {
"device": "/dev/mapper/fedora_host--186-root",
"kb_size": "51475068",
"kb_used": "42551284",
"kb_available": "6285960",
"percent_used": "88%",
"mount": "/",
"total_inodes": "3276800",
"inodes_used": "532908",
"inodes_available": "2743892",
"inodes_percent_used": "17%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
},
"tmpfs,/tmp": {
"device": "tmpfs",
"kb_size": "8055268",
"kb_used": "848396",
"kb_available": "7206872",
"percent_used": "11%",
"mount": "/tmp",
"total_inodes": "2013817",
"inodes_used": "1353",
"inodes_available": "2012464",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"seclabel"
]
},
"/dev/sda1,/boot": {
"device": "/dev/sda1",
"kb_size": "487652",
"kb_used": "126628",
"kb_available": "331328",
"percent_used": "28%",
"mount": "/boot",
"total_inodes": "128016",
"inodes_used": "405",
"inodes_available": "127611",
"inodes_percent_used": "1%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0"
},
"/dev/mapper/fedora_host--186-home,/home": {
"device": "/dev/mapper/fedora_host--186-home",
"kb_size": "185948124",
"kb_used": "105904724",
"kb_available": "70574680",
"percent_used": "61%",
"mount": "/home",
"total_inodes": "11821056",
"inodes_used": "1266687",
"inodes_available": "10554369",
"inodes_percent_used": "11%",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d"
},
"/dev/loop0,/var/lib/machines": {
"device": "/dev/loop0",
"kb_size": "512000",
"kb_used": "16672",
"kb_available": "429056",
"percent_used": "4%",
"mount": "/var/lib/machines",
"fs_type": "btrfs",
"uuid": "0f031512-ab15-497d-9abd-3a512b4a9390"
},
"tmpfs,/run/user/0": {
"device": "tmpfs",
"kb_size": "1611052",
"kb_used": "0",
"kb_available": "1611052",
"percent_used": "0%",
"mount": "/run/user/0",
"total_inodes": "2013817",
"inodes_used": "7",
"inodes_available": "2013810",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700"
]
},
"tmpfs,/run/user/1000": {
"device": "tmpfs",
"kb_size": "1611052",
"kb_used": "72",
"kb_available": "1610980",
"percent_used": "1%",
"mount": "/run/user/1000",
"total_inodes": "2013817",
"inodes_used": "36",
"inodes_available": "2013781",
"inodes_percent_used": "1%",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"seclabel",
"size=1611052k",
"mode=700",
"uid=1000",
"gid=1000"
]
},
"sysfs,/sys": {
"device": "sysfs",
"mount": "/sys",
"fs_type": "sysfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
]
},
"proc,/proc": {
"device": "proc",
"mount": "/proc",
"fs_type": "proc",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
]
},
"securityfs,/sys/kernel/security": {
"device": "securityfs",
"mount": "/sys/kernel/security",
"fs_type": "securityfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime"
]
},
"devpts,/dev/pts": {
"device": "devpts",
"mount": "/dev/pts",
"fs_type": "devpts",
"mount_options": [
"rw",
"nosuid",
"noexec",
"relatime",
"seclabel",
"gid=5",
"mode=620",
"ptmxmode=000"
]
},
"cgroup,/sys/fs/cgroup/systemd": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/systemd",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"xattr",
"release_agent=/usr/lib/systemd/systemd-cgroups-agent",
"name=systemd"
]
},
"pstore,/sys/fs/pstore": {
"device": "pstore",
"mount": "/sys/fs/pstore",
"fs_type": "pstore",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"seclabel"
]
},
"cgroup,/sys/fs/cgroup/devices": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/devices",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"devices"
]
},
"cgroup,/sys/fs/cgroup/cpuset": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/cpuset",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpuset"
]
},
"cgroup,/sys/fs/cgroup/perf_event": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/perf_event",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"perf_event"
]
},
"cgroup,/sys/fs/cgroup/hugetlb": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/hugetlb",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"hugetlb"
]
},
"cgroup,/sys/fs/cgroup/cpu,cpuacct": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/cpu,cpuacct",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"cpu",
"cpuacct"
]
},
"cgroup,/sys/fs/cgroup/blkio": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/blkio",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"blkio"
]
},
"cgroup,/sys/fs/cgroup/freezer": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/freezer",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"freezer"
]
},
"cgroup,/sys/fs/cgroup/memory": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/memory",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"memory"
]
},
"cgroup,/sys/fs/cgroup/pids": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/pids",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"pids"
]
},
"cgroup,/sys/fs/cgroup/net_cls,net_prio": {
"device": "cgroup",
"mount": "/sys/fs/cgroup/net_cls,net_prio",
"fs_type": "cgroup",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"net_cls",
"net_prio"
]
},
"configfs,/sys/kernel/config": {
"device": "configfs",
"mount": "/sys/kernel/config",
"fs_type": "configfs",
"mount_options": [
"rw",
"relatime"
]
},
"selinuxfs,/sys/fs/selinux": {
"device": "selinuxfs",
"mount": "/sys/fs/selinux",
"fs_type": "selinuxfs",
"mount_options": [
"rw",
"relatime"
]
},
"debugfs,/sys/kernel/debug": {
"device": "debugfs",
"mount": "/sys/kernel/debug",
"fs_type": "debugfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"hugetlbfs,/dev/hugepages": {
"device": "hugetlbfs",
"mount": "/dev/hugepages",
"fs_type": "hugetlbfs",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"mqueue,/dev/mqueue": {
"device": "mqueue",
"mount": "/dev/mqueue",
"fs_type": "mqueue",
"mount_options": [
"rw",
"relatime",
"seclabel"
]
},
"systemd-1,/proc/sys/fs/binfmt_misc": {
"device": "systemd-1",
"mount": "/proc/sys/fs/binfmt_misc",
"fs_type": "autofs",
"mount_options": [
"rw",
"relatime",
"fd=40",
"pgrp=1",
"timeout=0",
"minproto=5",
"maxproto=5",
"direct",
"pipe_ino=17610"
]
},
"/var/lib/machines.raw,/var/lib/machines": {
"device": "/var/lib/machines.raw",
"mount": "/var/lib/machines",
"fs_type": "btrfs",
"mount_options": [
"rw",
"relatime",
"seclabel",
"space_cache",
"subvolid=5",
"subvol=/"
]
},
"fusectl,/sys/fs/fuse/connections": {
"device": "fusectl",
"mount": "/sys/fs/fuse/connections",
"fs_type": "fusectl",
"mount_options": [
"rw",
"relatime"
]
},
"gvfsd-fuse,/run/user/1000/gvfs": {
"device": "gvfsd-fuse",
"mount": "/run/user/1000/gvfs",
"fs_type": "fuse.gvfsd-fuse",
"mount_options": [
"rw",
"nosuid",
"nodev",
"relatime",
"user_id=1000",
"group_id=1000"
]
},
"/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": {
"device": "/dev/mapper/fedora_host--186-root",
"mount": "/var/lib/docker/devicemapper",
"fs_type": "ext4",
"mount_options": [
"rw",
"relatime",
"seclabel",
"data=ordered"
],
"uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
},
"binfmt_misc,/proc/sys/fs/binfmt_misc": {
"device": "binfmt_misc",
"mount": "/proc/sys/fs/binfmt_misc",
"fs_type": "binfmt_misc",
"mount_options": [
"rw",
"relatime"
]
},
"/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
"device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
"mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
"fs_type": "xfs",
"mount_options": [
"rw",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"nouuid",
"attr2",
"inode64",
"logbsize=64k",
"sunit=128",
"swidth=128",
"noquota"
],
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
},
"shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
"device": "shm",
"mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm",
"fs_type": "tmpfs",
"mount_options": [
"rw",
"nosuid",
"nodev",
"noexec",
"relatime",
"context=\"system_u:object_r:container_file_t:s0:c523",
"c681\"",
"size=65536k"
]
},
"nsfs,/run/docker/netns/1ce89fd79f3d": {
"device": "nsfs",
"mount": "/run/docker/netns/1ce89fd79f3d",
"fs_type": "nsfs",
"mount_options": [
"rw"
]
},
"tracefs,/sys/kernel/debug/tracing": {
"device": "tracefs",
"mount": "/sys/kernel/debug/tracing",
"fs_type": "tracefs",
"mount_options": [
"rw",
"relatime"
]
},
"/dev/loop1,": {
"device": "/dev/loop1",
"fs_type": "xfs",
"uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
},
"/dev/mapper/docker-253:1-1180487-pool,": {
"device": "/dev/mapper/docker-253:1-1180487-pool"
},
"/dev/sr0,": {
"device": "/dev/sr0"
},
"/dev/loop2,": {
"device": "/dev/loop2"
},
"/dev/sda,": {
"device": "/dev/sda"
},
"/dev/sda2,": {
"device": "/dev/sda2",
"fs_type": "LVM2_member",
"uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK"
},
"/dev/mapper/fedora_host--186-swap,": {
"device": "/dev/mapper/fedora_host--186-swap",
"fs_type": "swap",
"uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d"
}
}
},
"virtualization": {
"systems": {
"kvm": "host"
},
"system": "kvm",
"role": "host",
"libvirt_version": "2.2.0",
"uri": "qemu:///system",
"capabilities": {
},
"nodeinfo": {
"cores": 4,
"cpus": 8,
"memory": 16110540,
"mhz": 2832,
"model": "x86_64",
"nodes": 1,
"sockets": 1,
"threads": 2
},
"domains": {
},
"networks": {
"vagrant-libvirt": {
"bridge_name": "virbr1",
"uuid": "877ddb27-b39c-427e-a7bf-1aa829389eeb"
},
"default": {
"bridge_name": "virbr0",
"uuid": "750d2567-23a8-470d-8a2b-71cd651e30d1"
}
},
"storage": {
"virt-images": {
"autostart": true,
"uuid": "d8a189fa-f98c-462f-9ea4-204eb77a96a1",
"allocation": 106412863488,
"available": 83998015488,
"capacity": 190410878976,
"state": 2,
"volumes": {
"rhel-atomic-host-standard-2014-7-1.qcow2": {
"key": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2",
"name": "rhel-atomic-host-standard-2014-7-1.qcow2",
"path": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2",
"allocation": 1087115264,
"capacity": 8589934592,
"type": 0
},
"atomic-beta-instance-7.qcow2": {
"key": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2",
"name": "atomic-beta-instance-7.qcow2",
"path": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2",
"allocation": 200704,
"capacity": 8589934592,
"type": 0
},
"os1-atomic-meta-data": {
"key": "/home/some_user/virt-images/os1-atomic-meta-data",
"name": "os1-atomic-meta-data",
"path": "/home/some_user/virt-images/os1-atomic-meta-data",
"allocation": 4096,
"capacity": 49,
"type": 0
},
"atomic-user-data": {
"key": "/home/some_user/virt-images/atomic-user-data",
"name": "atomic-user-data",
"path": "/home/some_user/virt-images/atomic-user-data",
"allocation": 4096,
"capacity": 512,
"type": 0
},
"qemu-snap.txt": {
"key": "/home/some_user/virt-images/qemu-snap.txt",
"name": "qemu-snap.txt",
"path": "/home/some_user/virt-images/qemu-snap.txt",
"allocation": 4096,
"capacity": 111,
"type": 0
},
"atomic-beta-instance-5.qcow2": {
"key": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2",
"name": "atomic-beta-instance-5.qcow2",
"path": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2",
"allocation": 339091456,
"capacity": 8589934592,
"type": 0
},
"meta-data": {
"key": "/home/some_user/virt-images/meta-data",
"name": "meta-data",
"path": "/home/some_user/virt-images/meta-data",
"allocation": 4096,
"capacity": 49,
"type": 0
},
"atomic-beta-instance-8.qcow2": {
"key": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2",
"name": "atomic-beta-instance-8.qcow2",
"path": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2",
"allocation": 322576384,
"capacity": 8589934592,
"type": 0
},
"user-data": {
"key": "/home/some_user/virt-images/user-data",
"name": "user-data",
"path": "/home/some_user/virt-images/user-data",
"allocation": 4096,
"capacity": 512,
"type": 0
},
"rhel-6-2015-10-16.qcow2": {
"key": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2",
"name": "rhel-6-2015-10-16.qcow2",
"path": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2",
"allocation": 7209422848,
"capacity": 17179869184,
"type": 0
},
"atomic_demo_notes.txt": {
"key": "/home/some_user/virt-images/atomic_demo_notes.txt",
"name": "atomic_demo_notes.txt",
"path": "/home/some_user/virt-images/atomic_demo_notes.txt",
"allocation": 4096,
"capacity": 354,
"type": 0
},
"packer-windows-2012-R2-standard": {
"key": "/home/some_user/virt-images/packer-windows-2012-R2-standard",
"name": "packer-windows-2012-R2-standard",
"path": "/home/some_user/virt-images/packer-windows-2012-R2-standard",
"allocation": 16761495552,
"capacity": 64424509440,
"type": 0
},
"atomic3-cidata.iso": {
"key": "/home/some_user/virt-images/atomic3-cidata.iso",
"name": "atomic3-cidata.iso",
"path": "/home/some_user/virt-images/atomic3-cidata.iso",
"allocation": 376832,
"capacity": 374784,
"type": 0
},
".atomic_demo_notes.txt.swp": {
"key": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp",
"name": ".atomic_demo_notes.txt.swp",
"path": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp",
"allocation": 12288,
"capacity": 12288,
"type": 0
},
"rhel7-2015-10-13.qcow2": {
"key": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2",
"name": "rhel7-2015-10-13.qcow2",
"path": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2",
"allocation": 4679413760,
"capacity": 12884901888,
"type": 0
}
}
},
"default": {
"autostart": true,
"uuid": "c8d9d160-efc0-4207-81c2-e79d6628f7e1",
"allocation": 43745488896,
"available": 8964980736,
"capacity": 52710469632,
"state": 2,
"volumes": {
"s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img": {
"key": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
"name": "s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
"path": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
"allocation": 1258622976,
"capacity": 42949672960,
"type": 0
},
"centos-7.0_vagrant_box_image.img": {
"key": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img",
"name": "centos-7.0_vagrant_box_image.img",
"path": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img",
"allocation": 1649414144,
"capacity": 42949672960,
"type": 0
},
"baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img": {
"key": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
"name": "baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
"path": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
"allocation": 810422272,
"capacity": 42949672960,
"type": 0
},
"centos-6_vagrant_box_image.img": {
"key": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img",
"name": "centos-6_vagrant_box_image.img",
"path": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img",
"allocation": 1423642624,
"capacity": 42949672960,
"type": 0
},
"centos5-ansible_default.img": {
"key": "/var/lib/libvirt/images/centos5-ansible_default.img",
"name": "centos5-ansible_default.img",
"path": "/var/lib/libvirt/images/centos5-ansible_default.img",
"allocation": 8986624,
"capacity": 42949672960,
"type": 0
},
"ubuntu_default.img": {
"key": "/var/lib/libvirt/images/ubuntu_default.img",
"name": "ubuntu_default.img",
"path": "/var/lib/libvirt/images/ubuntu_default.img",
"allocation": 3446833152,
"capacity": 42949672960,
"type": 0
}
}
},
"boot-scratch": {
"autostart": true,
"uuid": "e5ef4360-b889-4843-84fb-366e8fb30f20",
"allocation": 43745488896,
"available": 8964980736,
"capacity": 52710469632,
"state": 2,
"volumes": {
}
}
}
},
"network": {
"interfaces": {
"lo": {
"mtu": "65536",
"flags": [
"LOOPBACK",
"UP",
"LOWER_UP"
],
"encapsulation": "Loopback",
"addresses": {
"127.0.0.1": {
"family": "inet",
"prefixlen": "8",
"netmask": "255.0.0.0",
"scope": "Node",
"ip_scope": "LOOPBACK"
},
"::1": {
"family": "inet6",
"prefixlen": "128",
"scope": "Node",
"tags": [
],
"ip_scope": "LINK LOCAL LOOPBACK"
}
},
"state": "unknown"
},
"em1": {
"type": "em",
"number": "1",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP"
],
"encapsulation": "Ethernet",
"addresses": {
"3C:97:0E:E9:28:8E": {
"family": "lladdr"
}
},
"state": "down",
"link_speed": 0,
"duplex": "Unknown! (255)",
"port": "Twisted Pair",
"transceiver": "internal",
"auto_negotiation": "on",
"mdi_x": "Unknown (auto)",
"ring_params": {
"max_rx": 4096,
"max_rx_mini": 0,
"max_rx_jumbo": 0,
"max_tx": 4096,
"current_rx": 256,
"current_rx_mini": 0,
"current_rx_jumbo": 0,
"current_tx": 256
}
},
"wlp4s0": {
"type": "wlp4s",
"number": "0",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP",
"LOWER_UP"
],
"encapsulation": "Ethernet",
"addresses": {
"5C:51:4F:E6:A8:E3": {
"family": "lladdr"
},
"192.168.1.19": {
"family": "inet",
"prefixlen": "24",
"netmask": "255.255.255.0",
"broadcast": "192.168.1.255",
"scope": "Global",
"ip_scope": "RFC1918 PRIVATE"
},
"fe80::5e51:4fff:fee6:a8e3": {
"family": "inet6",
"prefixlen": "64",
"scope": "Link",
"tags": [
],
"ip_scope": "LINK LOCAL UNICAST"
}
},
"state": "up",
"arp": {
"192.168.1.33": "00:11:d9:39:3e:e0",
"192.168.1.20": "ac:3a:7a:a7:49:e8",
"192.168.1.17": "00:09:b0:d0:64:19",
"192.168.1.22": "ac:bc:32:82:30:bb",
"192.168.1.15": "00:11:32:2e:10:d5",
"192.168.1.1": "84:1b:5e:03:50:b2",
"192.168.1.34": "00:11:d9:5f:e8:e6",
"192.168.1.16": "dc:a5:f4:ac:22:3a",
"192.168.1.21": "74:c2:46:73:28:d8",
"192.168.1.27": "00:17:88:09:3c:bb",
"192.168.1.24": "08:62:66:90:a2:b8"
},
"routes": [
{
"destination": "default",
"family": "inet",
"via": "192.168.1.1",
"metric": "600",
"proto": "static"
},
{
"destination": "66.187.232.64",
"family": "inet",
"via": "192.168.1.1",
"metric": "600",
"proto": "static"
},
{
"destination": "192.168.1.0/24",
"family": "inet",
"scope": "link",
"metric": "600",
"proto": "kernel",
"src": "192.168.1.19"
},
{
"destination": "192.168.1.1",
"family": "inet",
"scope": "link",
"metric": "600",
"proto": "static"
},
{
"destination": "fe80::/64",
"family": "inet6",
"metric": "256",
"proto": "kernel"
}
],
"ring_params": {
"max_rx": 0,
"max_rx_mini": 0,
"max_rx_jumbo": 0,
"max_tx": 0,
"current_rx": 0,
"current_rx_mini": 0,
"current_rx_jumbo": 0,
"current_tx": 0
}
},
"virbr1": {
"type": "virbr",
"number": "1",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP"
],
"encapsulation": "Ethernet",
"addresses": {
"52:54:00:B4:68:A9": {
"family": "lladdr"
},
"192.168.121.1": {
"family": "inet",
"prefixlen": "24",
"netmask": "255.255.255.0",
"broadcast": "192.168.121.255",
"scope": "Global",
"ip_scope": "RFC1918 PRIVATE"
}
},
"state": "1",
"routes": [
{
"destination": "192.168.121.0/24",
"family": "inet",
"scope": "link",
"proto": "kernel",
"src": "192.168.121.1"
}
],
"ring_params": {
}
},
"virbr1-nic": {
"type": "virbr",
"number": "1-nic",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST"
],
"encapsulation": "Ethernet",
"addresses": {
"52:54:00:B4:68:A9": {
"family": "lladdr"
}
},
"state": "disabled",
"link_speed": 10,
"duplex": "Full",
"port": "Twisted Pair",
"transceiver": "internal",
"auto_negotiation": "off",
"mdi_x": "Unknown",
"ring_params": {
}
},
"virbr0": {
"type": "virbr",
"number": "0",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP"
],
"encapsulation": "Ethernet",
"addresses": {
"52:54:00:CE:82:5E": {
"family": "lladdr"
},
"192.168.137.1": {
"family": "inet",
"prefixlen": "24",
"netmask": "255.255.255.0",
"broadcast": "192.168.137.255",
"scope": "Global",
"ip_scope": "RFC1918 PRIVATE"
}
},
"state": "1",
"routes": [
{
"destination": "192.168.137.0/24",
"family": "inet",
"scope": "link",
"proto": "kernel",
"src": "192.168.137.1"
}
],
"ring_params": {
}
},
"virbr0-nic": {
"type": "virbr",
"number": "0-nic",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST"
],
"encapsulation": "Ethernet",
"addresses": {
"52:54:00:CE:82:5E": {
"family": "lladdr"
}
},
"state": "disabled",
"link_speed": 10,
"duplex": "Full",
"port": "Twisted Pair",
"transceiver": "internal",
"auto_negotiation": "off",
"mdi_x": "Unknown",
"ring_params": {
}
},
"docker0": {
"type": "docker",
"number": "0",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP",
"LOWER_UP"
],
"encapsulation": "Ethernet",
"addresses": {
"02:42:EA:15:D8:84": {
"family": "lladdr"
},
"172.17.0.1": {
"family": "inet",
"prefixlen": "16",
"netmask": "255.255.0.0",
"scope": "Global",
"ip_scope": "RFC1918 PRIVATE"
},
"fe80::42:eaff:fe15:d884": {
"family": "inet6",
"prefixlen": "64",
"scope": "Link",
"tags": [
],
"ip_scope": "LINK LOCAL UNICAST"
}
},
"state": "0",
"arp": {
"172.17.0.2": "02:42:ac:11:00:02",
"172.17.0.4": "02:42:ac:11:00:04",
"172.17.0.3": "02:42:ac:11:00:03"
},
"routes": [
{
"destination": "172.17.0.0/16",
"family": "inet",
"scope": "link",
"proto": "kernel",
"src": "172.17.0.1"
},
{
"destination": "fe80::/64",
"family": "inet6",
"metric": "256",
"proto": "kernel"
}
],
"ring_params": {
}
},
"vethf20ff12": {
"type": "vethf20ff1",
"number": "2",
"mtu": "1500",
"flags": [
"BROADCAST",
"MULTICAST",
"UP",
"LOWER_UP"
],
"encapsulation": "Ethernet",
"addresses": {
"AE:6E:2B:1E:A1:31": {
"family": "lladdr"
},
"fe80::ac6e:2bff:fe1e:a131": {
"family": "inet6",
"prefixlen": "64",
"scope": "Link",
"tags": [
],
"ip_scope": "LINK LOCAL UNICAST"
}
},
"state": "forwarding",
"routes": [
{
"destination": "fe80::/64",
"family": "inet6",
"metric": "256",
"proto": "kernel"
}
],
"link_speed": 10000,
"duplex": "Full",
"port": "Twisted Pair",
"transceiver": "internal",
"auto_negotiation": "off",
"mdi_x": "Unknown",
"ring_params": {
}
},
"tun0": {
"type": "tun",
"number": "0",
"mtu": "1360",
"flags": [
"MULTICAST",
"NOARP",
"UP",
"LOWER_UP"
],
"addresses": {
"10.10.120.68": {
"family": "inet",
"prefixlen": "21",
"netmask": "255.255.248.0",
"broadcast": "10.10.127.255",
"scope": "Global",
"ip_scope": "RFC1918 PRIVATE"
},
"fe80::365e:885c:31ca:7670": {
"family": "inet6",
"prefixlen": "64",
"scope": "Link",
"tags": [
"flags",
"800"
],
"ip_scope": "LINK LOCAL UNICAST"
}
},
"state": "unknown",
"routes": [
{
"destination": "10.0.0.0/8",
"family": "inet",
"via": "10.10.120.1",
"metric": "50",
"proto": "static"
},
{
"destination": "10.10.120.0/21",
"family": "inet",
"scope": "link",
"metric": "50",
"proto": "kernel",
"src": "10.10.120.68"
},
{
"destination": "fe80::/64",
"family": "inet6",
"metric": "256",
"proto": "kernel"
}
]
}
},
"default_interface": "wlp4s0",
"default_gateway": "192.168.1.1"
},
"counters": {
"network": {
"interfaces": {
"lo": {
"tx": {
"queuelen": "1",
"bytes": "202568405",
"packets": "1845473",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "202568405",
"packets": "1845473",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"em1": {
"tx": {
"queuelen": "1000",
"bytes": "673898037",
"packets": "1631282",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "1536186718",
"packets": "1994394",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"wlp4s0": {
"tx": {
"queuelen": "1000",
"bytes": "3927670539",
"packets": "15146886",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "12367173401",
"packets": "23981258",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"virbr1": {
"tx": {
"queuelen": "1000",
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"virbr1-nic": {
"tx": {
"queuelen": "1000",
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"virbr0": {
"tx": {
"queuelen": "1000",
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"virbr0-nic": {
"tx": {
"queuelen": "1000",
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "0",
"packets": "0",
"errors": "0",
"drop": "0",
"overrun": "0"
}
},
"docker0": {
"rx": {
"bytes": "2471313",
"packets": "36915",
"errors": "0",
"drop": "0",
"overrun": "0"
},
"tx": {
"bytes": "413371670",
"packets": "127713",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
}
},
"vethf20ff12": {
"rx": {
"bytes": "34391",
"packets": "450",
"errors": "0",
"drop": "0",
"overrun": "0"
},
"tx": {
"bytes": "17919115",
"packets": "108069",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
}
},
"tun0": {
"tx": {
"queuelen": "100",
"bytes": "22343462",
"packets": "253442",
"errors": "0",
"drop": "0",
"carrier": "0",
"collisions": "0"
},
"rx": {
"bytes": "115160002",
"packets": "197529",
"errors": "0",
"drop": "0",
"overrun": "0"
}
}
}
}
},
"ipaddress": "192.168.1.19",
"macaddress": "5C:51:4F:E6:A8:E3",
"ip6address": "fe80::42:eaff:fe15:d884",
"cpu": {
"0": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "3238.714",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "0",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"1": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "3137.200",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "0",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"2": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "3077.050",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "1",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"3": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "2759.655",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "1",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"4": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "3419.000",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "2",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"5": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "2752.569",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "2",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"6": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "2953.619",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "3",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"7": {
"vendor_id": "GenuineIntel",
"family": "6",
"model": "60",
"model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"stepping": "3",
"mhz": "2927.087",
"cache_size": "6144 KB",
"physical_id": "0",
"core_id": "3",
"cores": "4",
"flags": [
"fpu",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"pbe",
"syscall",
"nx",
"pdpe1gb",
"rdtscp",
"lm",
"constant_tsc",
"arch_perfmon",
"pebs",
"bts",
"rep_good",
"nopl",
"xtopology",
"nonstop_tsc",
"aperfmperf",
"eagerfpu",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"sdbg",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"aes",
"xsave",
"avx",
"f16c",
"rdrand",
"lahf_lm",
"abm",
"epb",
"tpr_shadow",
"vnmi",
"flexpriority",
"ept",
"vpid",
"fsgsbase",
"tsc_adjust",
"bmi1",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"xsaveopt",
"dtherm",
"ida",
"arat",
"pln",
"pts"
]
},
"total": 8,
"real": 1,
"cores": 4
},
"etc": {
"passwd": {
"root": {
"dir": "/root",
"gid": 0,
"uid": 0,
"shell": "/bin/bash",
"gecos": "root"
},
"bin": {
"dir": "/bin",
"gid": 1,
"uid": 1,
"shell": "/sbin/nologin",
"gecos": "bin"
},
"daemon": {
"dir": "/sbin",
"gid": 2,
"uid": 2,
"shell": "/sbin/nologin",
"gecos": "daemon"
},
"adm": {
"dir": "/var/adm",
"gid": 4,
"uid": 3,
"shell": "/sbin/nologin",
"gecos": "adm"
},
"lp": {
"dir": "/var/spool/lpd",
"gid": 7,
"uid": 4,
"shell": "/sbin/nologin",
"gecos": "lp"
},
"sync": {
"dir": "/sbin",
"gid": 0,
"uid": 5,
"shell": "/bin/sync",
"gecos": "sync"
},
"shutdown": {
"dir": "/sbin",
"gid": 0,
"uid": 6,
"shell": "/sbin/shutdown",
"gecos": "shutdown"
},
"halt": {
"dir": "/sbin",
"gid": 0,
"uid": 7,
"shell": "/sbin/halt",
"gecos": "halt"
},
"mail": {
"dir": "/var/spool/mail",
"gid": 12,
"uid": 8,
"shell": "/sbin/nologin",
"gecos": "mail"
},
"operator": {
"dir": "/root",
"gid": 0,
"uid": 11,
"shell": "/sbin/nologin",
"gecos": "operator"
},
"games": {
"dir": "/usr/games",
"gid": 100,
"uid": 12,
"shell": "/sbin/nologin",
"gecos": "games"
},
"ftp": {
"dir": "/var/ftp",
"gid": 50,
"uid": 14,
"shell": "/sbin/nologin",
"gecos": "FTP User"
},
"nobody": {
"dir": "/",
"gid": 99,
"uid": 99,
"shell": "/sbin/nologin",
"gecos": "Nobody"
},
"avahi-autoipd": {
"dir": "/var/lib/avahi-autoipd",
"gid": 170,
"uid": 170,
"shell": "/sbin/nologin",
"gecos": "Avahi IPv4LL Stack"
},
"dbus": {
"dir": "/",
"gid": 81,
"uid": 81,
"shell": "/sbin/nologin",
"gecos": "System message bus"
},
"polkitd": {
"dir": "/",
"gid": 999,
"uid": 999,
"shell": "/sbin/nologin",
"gecos": "User for polkitd"
},
"abrt": {
"dir": "/etc/abrt",
"gid": 173,
"uid": 173,
"shell": "/sbin/nologin",
"gecos": ""
},
"usbmuxd": {
"dir": "/",
"gid": 113,
"uid": 113,
"shell": "/sbin/nologin",
"gecos": "usbmuxd user"
},
"colord": {
"dir": "/var/lib/colord",
"gid": 998,
"uid": 998,
"shell": "/sbin/nologin",
"gecos": "User for colord"
},
"geoclue": {
"dir": "/var/lib/geoclue",
"gid": 997,
"uid": 997,
"shell": "/sbin/nologin",
"gecos": "User for geoclue"
},
"rpc": {
"dir": "/var/lib/rpcbind",
"gid": 32,
"uid": 32,
"shell": "/sbin/nologin",
"gecos": "Rpcbind Daemon"
},
"rpcuser": {
"dir": "/var/lib/nfs",
"gid": 29,
"uid": 29,
"shell": "/sbin/nologin",
"gecos": "RPC Service User"
},
"nfsnobody": {
"dir": "/var/lib/nfs",
"gid": 65534,
"uid": 65534,
"shell": "/sbin/nologin",
"gecos": "Anonymous NFS User"
},
"qemu": {
"dir": "/",
"gid": 107,
"uid": 107,
"shell": "/sbin/nologin",
"gecos": "qemu user"
},
"rtkit": {
"dir": "/proc",
"gid": 172,
"uid": 172,
"shell": "/sbin/nologin",
"gecos": "RealtimeKit"
},
"radvd": {
"dir": "/",
"gid": 75,
"uid": 75,
"shell": "/sbin/nologin",
"gecos": "radvd user"
},
"tss": {
"dir": "/dev/null",
"gid": 59,
"uid": 59,
"shell": "/sbin/nologin",
"gecos": "Account used by the trousers package to sandbox the tcsd daemon"
},
"unbound": {
"dir": "/etc/unbound",
"gid": 995,
"uid": 996,
"shell": "/sbin/nologin",
"gecos": "Unbound DNS resolver"
},
"openvpn": {
"dir": "/etc/openvpn",
"gid": 994,
"uid": 995,
"shell": "/sbin/nologin",
"gecos": "OpenVPN"
},
"saslauth": {
"dir": "/run/saslauthd",
"gid": 76,
"uid": 994,
"shell": "/sbin/nologin",
"gecos": "\"Saslauthd user\""
},
"avahi": {
"dir": "/var/run/avahi-daemon",
"gid": 70,
"uid": 70,
"shell": "/sbin/nologin",
"gecos": "Avahi mDNS/DNS-SD Stack"
},
"pulse": {
"dir": "/var/run/pulse",
"gid": 992,
"uid": 993,
"shell": "/sbin/nologin",
"gecos": "PulseAudio System Daemon"
},
"gdm": {
"dir": "/var/lib/gdm",
"gid": 42,
"uid": 42,
"shell": "/sbin/nologin",
"gecos": ""
},
"gnome-initial-setup": {
"dir": "/run/gnome-initial-setup/",
"gid": 990,
"uid": 992,
"shell": "/sbin/nologin",
"gecos": ""
},
"nm-openconnect": {
"dir": "/",
"gid": 989,
"uid": 991,
"shell": "/sbin/nologin",
"gecos": "NetworkManager user for OpenConnect"
},
"sshd": {
"dir": "/var/empty/sshd",
"gid": 74,
"uid": 74,
"shell": "/sbin/nologin",
"gecos": "Privilege-separated SSH"
},
"chrony": {
"dir": "/var/lib/chrony",
"gid": 988,
"uid": 990,
"shell": "/sbin/nologin",
"gecos": ""
},
"tcpdump": {
"dir": "/",
"gid": 72,
"uid": 72,
"shell": "/sbin/nologin",
"gecos": ""
},
"some_user": {
"dir": "/home/some_user",
"gid": 1000,
"uid": 1000,
"shell": "/bin/bash",
"gecos": "some_user"
},
"systemd-journal-gateway": {
"dir": "/var/log/journal",
"gid": 191,
"uid": 191,
"shell": "/sbin/nologin",
"gecos": "Journal Gateway"
},
"postgres": {
"dir": "/var/lib/pgsql",
"gid": 26,
"uid": 26,
"shell": "/bin/bash",
"gecos": "PostgreSQL Server"
},
"dockerroot": {
"dir": "/var/lib/docker",
"gid": 977,
"uid": 984,
"shell": "/sbin/nologin",
"gecos": "Docker User"
},
"apache": {
"dir": "/usr/share/httpd",
"gid": 48,
"uid": 48,
"shell": "/sbin/nologin",
"gecos": "Apache"
},
"systemd-network": {
"dir": "/",
"gid": 974,
"uid": 982,
"shell": "/sbin/nologin",
"gecos": "systemd Network Management"
},
"systemd-resolve": {
"dir": "/",
"gid": 973,
"uid": 981,
"shell": "/sbin/nologin",
"gecos": "systemd Resolver"
},
"systemd-bus-proxy": {
"dir": "/",
"gid": 972,
"uid": 980,
"shell": "/sbin/nologin",
"gecos": "systemd Bus Proxy"
},
"systemd-journal-remote": {
"dir": "//var/log/journal/remote",
"gid": 970,
"uid": 979,
"shell": "/sbin/nologin",
"gecos": "Journal Remote"
},
"systemd-journal-upload": {
"dir": "//var/log/journal/upload",
"gid": 969,
"uid": 978,
"shell": "/sbin/nologin",
"gecos": "Journal Upload"
},
"setroubleshoot": {
"dir": "/var/lib/setroubleshoot",
"gid": 967,
"uid": 977,
"shell": "/sbin/nologin",
"gecos": ""
},
"oprofile": {
"dir": "/var/lib/oprofile",
"gid": 16,
"uid": 16,
"shell": "/sbin/nologin",
"gecos": "Special user account to be used by OProfile"
}
},
"group": {
"root": {
"gid": 0,
"members": [
]
},
"bin": {
"gid": 1,
"members": [
]
},
"daemon": {
"gid": 2,
"members": [
]
},
"sys": {
"gid": 3,
"members": [
]
},
"adm": {
"gid": 4,
"members": [
"logcheck"
]
},
"tty": {
"gid": 5,
"members": [
]
},
"disk": {
"gid": 6,
"members": [
]
},
"lp": {
"gid": 7,
"members": [
]
},
"mem": {
"gid": 8,
"members": [
]
},
"kmem": {
"gid": 9,
"members": [
]
},
"wheel": {
"gid": 10,
"members": [
]
},
"cdrom": {
"gid": 11,
"members": [
]
},
"mail": {
"gid": 12,
"members": [
]
},
"man": {
"gid": 15,
"members": [
]
},
"dialout": {
"gid": 18,
"members": [
"lirc"
]
},
"floppy": {
"gid": 19,
"members": [
]
},
"games": {
"gid": 20,
"members": [
]
},
"tape": {
"gid": 30,
"members": [
]
},
"video": {
"gid": 39,
"members": [
]
},
"ftp": {
"gid": 50,
"members": [
]
},
"lock": {
"gid": 54,
"members": [
"lirc"
]
},
"audio": {
"gid": 63,
"members": [
]
},
"nobody": {
"gid": 99,
"members": [
]
},
"users": {
"gid": 100,
"members": [
]
},
"utmp": {
"gid": 22,
"members": [
]
},
"utempter": {
"gid": 35,
"members": [
]
},
"avahi-autoipd": {
"gid": 170,
"members": [
]
},
"systemd-journal": {
"gid": 190,
"members": [
]
},
"dbus": {
"gid": 81,
"members": [
]
},
"polkitd": {
"gid": 999,
"members": [
]
},
"abrt": {
"gid": 173,
"members": [
]
},
"dip": {
"gid": 40,
"members": [
]
},
"usbmuxd": {
"gid": 113,
"members": [
]
},
"colord": {
"gid": 998,
"members": [
]
},
"geoclue": {
"gid": 997,
"members": [
]
},
"ssh_keys": {
"gid": 996,
"members": [
]
},
"rpc": {
"gid": 32,
"members": [
]
},
"rpcuser": {
"gid": 29,
"members": [
]
},
"nfsnobody": {
"gid": 65534,
"members": [
]
},
"kvm": {
"gid": 36,
"members": [
"qemu"
]
},
"qemu": {
"gid": 107,
"members": [
]
},
"rtkit": {
"gid": 172,
"members": [
]
},
"radvd": {
"gid": 75,
"members": [
]
},
"tss": {
"gid": 59,
"members": [
]
},
"unbound": {
"gid": 995,
"members": [
]
},
"openvpn": {
"gid": 994,
"members": [
]
},
"saslauth": {
"gid": 76,
"members": [
]
},
"avahi": {
"gid": 70,
"members": [
]
},
"brlapi": {
"gid": 993,
"members": [
]
},
"pulse": {
"gid": 992,
"members": [
]
},
"pulse-access": {
"gid": 991,
"members": [
]
},
"gdm": {
"gid": 42,
"members": [
]
},
"gnome-initial-setup": {
"gid": 990,
"members": [
]
},
"nm-openconnect": {
"gid": 989,
"members": [
]
},
"sshd": {
"gid": 74,
"members": [
]
},
"slocate": {
"gid": 21,
"members": [
]
},
"chrony": {
"gid": 988,
"members": [
]
},
"tcpdump": {
"gid": 72,
"members": [
]
},
"some_user": {
"gid": 1000,
"members": [
"some_user"
]
},
"docker": {
"gid": 986,
"members": [
"some_user"
]
}
},
"c": {
"gcc": {
"target": "x86_64-redhat-linux",
"configured_with": "../configure --enable-bootstrap --enable-languages=c,c++,objc,obj-c++,fortran,ada,go,lto --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-bugurl=http://bugzilla.redhat.com/bugzilla --enable-shared --enable-threads=posix --enable-checking=release --enable-multilib --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu --enable-plugin --enable-initfini-array --disable-libgcj --with-isl --enable-libmpx --enable-gnu-indirect-function --with-tune=generic --with-arch_32=i686 --build=x86_64-redhat-linux",
"thread_model": "posix",
"description": "gcc version 6.3.1 20161221 (Red Hat 6.3.1-1) (GCC) ",
"version": "6.3.1"
},
"glibc": {
"version": "2.24",
"description": "GNU C Library (GNU libc) stable release version 2.24, by Roland McGrath et al."
}
},
"lua": {
"version": "5.3.4"
},
"ruby": {
"platform": "x86_64-linux",
"version": "2.3.3",
"release_date": "2016-11-21",
"target": "x86_64-redhat-linux-gnu",
"target_cpu": "x86_64",
"target_vendor": "redhat",
"target_os": "linux",
"host": "x86_64-redhat-linux-gnu",
"host_cpu": "x86_64",
"host_os": "linux-gnu",
"host_vendor": "redhat",
"bin_dir": "/usr/bin",
"ruby_bin": "/usr/bin/ruby",
"gems_dir": "/home/some_user/.gem/ruby",
"gem_bin": "/usr/bin/gem"
}
},
"command": {
"ps": "ps -ef"
},
"root_group": "root",
"fips": {
"kernel": {
"enabled": false
}
},
"hostname": "myhostname",
"machinename": "myhostname",
"fqdn": "myhostname",
"domain": null,
"machine_id": "1234567abcede123456123456123456a",
"privateaddress": "192.168.1.100",
"keys": {
"ssh": {
}
},
"time": {
"timezone": "EDT"
},
"sessions": {
"by_session": {
"1918": {
"session": "1918",
"uid": "1000",
"user": "some_user",
"seat": null
},
"5": {
"session": "5",
"uid": "1000",
"user": "some_user",
"seat": "seat0"
},
"3": {
"session": "3",
"uid": "0",
"user": "root",
"seat": "seat0"
}
},
"by_user": {
"some_user": [
{
"session": "1918",
"uid": "1000",
"user": "some_user",
"seat": null
},
{
"session": "5",
"uid": "1000",
"user": "some_user",
"seat": "seat0"
}
],
"root": [
{
"session": "3",
"uid": "0",
"user": "root",
"seat": "seat0"
}
]
}
},
"hostnamectl": {
"static_hostname": "myhostname",
"icon_name": "computer-laptop",
"chassis": "laptop",
"machine_id": "24dc16bd7694404c825b517ab46d9d6b",
"machine_id": "12345123451234512345123451242323",
"boot_id": "3d5d5512341234123412341234123423",
"operating_system": "Fedora 25 (Workstation Edition)",
"cpe_os_name": "cpe",
"kernel": "Linux 4.9.14-200.fc25.x86_64",
"architecture": "x86-64"
},
"block_device": {
"dm-1": {
"size": "104857600",
"removable": "0",
"rotational": "0",
"physical_block_size": "512",
"logical_block_size": "512"
},
"loop1": {
"size": "209715200",
"removable": "0",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
},
"sr0": {
"size": "2097151",
"removable": "1",
"model": "DVD-RAM UJ8E2",
"rev": "SB01",
"state": "running",
"timeout": "30",
"vendor": "MATSHITA",
"queue_depth": "1",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
},
"dm-2": {
"size": "378093568",
"removable": "0",
"rotational": "0",
"physical_block_size": "512",
"logical_block_size": "512"
},
"loop2": {
"size": "4194304",
"removable": "0",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
},
"dm-0": {
"size": "16138240",
"removable": "0",
"rotational": "0",
"physical_block_size": "512",
"logical_block_size": "512"
},
"loop0": {
"size": "1024000",
"removable": "0",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
},
"sda": {
"size": "500118192",
"removable": "0",
"model": "SAMSUNG MZ7TD256",
"rev": "2L5Q",
"state": "running",
"timeout": "30",
"vendor": "ATA",
"queue_depth": "31",
"rotational": "0",
"physical_block_size": "512",
"logical_block_size": "512"
},
"dm-5": {
"size": "20971520",
"removable": "0",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
},
"dm-3": {
"size": "209715200",
"removable": "0",
"rotational": "1",
"physical_block_size": "512",
"logical_block_size": "512"
}
},
"sysconf": {
"LINK_MAX": 65000,
"_POSIX_LINK_MAX": 65000,
"MAX_CANON": 255,
"_POSIX_MAX_CANON": 255,
"MAX_INPUT": 255,
"_POSIX_MAX_INPUT": 255,
"NAME_MAX": 255,
"_POSIX_NAME_MAX": 255,
"PATH_MAX": 4096,
"_POSIX_PATH_MAX": 4096,
"PIPE_BUF": 4096,
"_POSIX_PIPE_BUF": 4096,
"SOCK_MAXBUF": null,
"_POSIX_ASYNC_IO": null,
"_POSIX_CHOWN_RESTRICTED": 1,
"_POSIX_NO_TRUNC": 1,
"_POSIX_PRIO_IO": null,
"_POSIX_SYNC_IO": null,
"_POSIX_VDISABLE": 0,
"ARG_MAX": 2097152,
"ATEXIT_MAX": 2147483647,
"CHAR_BIT": 8,
"CHAR_MAX": 127,
"CHAR_MIN": -128,
"CHILD_MAX": 62844,
"CLK_TCK": 100,
"INT_MAX": 2147483647,
"INT_MIN": -2147483648,
"IOV_MAX": 1024,
"LOGNAME_MAX": 256,
"LONG_BIT": 64,
"MB_LEN_MAX": 16,
"NGROUPS_MAX": 65536,
"NL_ARGMAX": 4096,
"NL_LANGMAX": 2048,
"NL_MSGMAX": 2147483647,
"NL_NMAX": 2147483647,
"NL_SETMAX": 2147483647,
"NL_TEXTMAX": 2147483647,
"NSS_BUFLEN_GROUP": 1024,
"NSS_BUFLEN_PASSWD": 1024,
"NZERO": 20,
"OPEN_MAX": 1024,
"PAGESIZE": 4096,
"PAGE_SIZE": 4096,
"PASS_MAX": 8192,
"PTHREAD_DESTRUCTOR_ITERATIONS": 4,
"PTHREAD_KEYS_MAX": 1024,
"PTHREAD_STACK_MIN": 16384,
"PTHREAD_THREADS_MAX": null,
"SCHAR_MAX": 127,
"SCHAR_MIN": -128,
"SHRT_MAX": 32767,
"SHRT_MIN": -32768,
"SSIZE_MAX": 32767,
"TTY_NAME_MAX": 32,
"TZNAME_MAX": 6,
"UCHAR_MAX": 255,
"UINT_MAX": 4294967295,
"UIO_MAXIOV": 1024,
"ULONG_MAX": 18446744073709551615,
"USHRT_MAX": 65535,
"WORD_BIT": 32,
"_AVPHYS_PAGES": 955772,
"_NPROCESSORS_CONF": 8,
"_NPROCESSORS_ONLN": 8,
"_PHYS_PAGES": 4027635,
"_POSIX_ARG_MAX": 2097152,
"_POSIX_ASYNCHRONOUS_IO": 200809,
"_POSIX_CHILD_MAX": 62844,
"_POSIX_FSYNC": 200809,
"_POSIX_JOB_CONTROL": 1,
"_POSIX_MAPPED_FILES": 200809,
"_POSIX_MEMLOCK": 200809,
"_POSIX_MEMLOCK_RANGE": 200809,
"_POSIX_MEMORY_PROTECTION": 200809,
"_POSIX_MESSAGE_PASSING": 200809,
"_POSIX_NGROUPS_MAX": 65536,
"_POSIX_OPEN_MAX": 1024,
"_POSIX_PII": null,
"_POSIX_PII_INTERNET": null,
"_POSIX_PII_INTERNET_DGRAM": null,
"_POSIX_PII_INTERNET_STREAM": null,
"_POSIX_PII_OSI": null,
"_POSIX_PII_OSI_CLTS": null,
"_POSIX_PII_OSI_COTS": null,
"_POSIX_PII_OSI_M": null,
"_POSIX_PII_SOCKET": null,
"_POSIX_PII_XTI": null,
"_POSIX_POLL": null,
"_POSIX_PRIORITIZED_IO": 200809,
"_POSIX_PRIORITY_SCHEDULING": 200809,
"_POSIX_REALTIME_SIGNALS": 200809,
"_POSIX_SAVED_IDS": 1,
"_POSIX_SELECT": null,
"_POSIX_SEMAPHORES": 200809,
"_POSIX_SHARED_MEMORY_OBJECTS": 200809,
"_POSIX_SSIZE_MAX": 32767,
"_POSIX_STREAM_MAX": 16,
"_POSIX_SYNCHRONIZED_IO": 200809,
"_POSIX_THREADS": 200809,
"_POSIX_THREAD_ATTR_STACKADDR": 200809,
"_POSIX_THREAD_ATTR_STACKSIZE": 200809,
"_POSIX_THREAD_PRIORITY_SCHEDULING": 200809,
"_POSIX_THREAD_PRIO_INHERIT": 200809,
"_POSIX_THREAD_PRIO_PROTECT": 200809,
"_POSIX_THREAD_ROBUST_PRIO_INHERIT": null,
"_POSIX_THREAD_ROBUST_PRIO_PROTECT": null,
"_POSIX_THREAD_PROCESS_SHARED": 200809,
"_POSIX_THREAD_SAFE_FUNCTIONS": 200809,
"_POSIX_TIMERS": 200809,
"TIMER_MAX": null,
"_POSIX_TZNAME_MAX": 6,
"_POSIX_VERSION": 200809,
"_T_IOV_MAX": null,
"_XOPEN_CRYPT": 1,
"_XOPEN_ENH_I18N": 1,
"_XOPEN_LEGACY": 1,
"_XOPEN_REALTIME": 1,
"_XOPEN_REALTIME_THREADS": 1,
"_XOPEN_SHM": 1,
"_XOPEN_UNIX": 1,
"_XOPEN_VERSION": 700,
"_XOPEN_XCU_VERSION": 4,
"_XOPEN_XPG2": 1,
"_XOPEN_XPG3": 1,
"_XOPEN_XPG4": 1,
"BC_BASE_MAX": 99,
"BC_DIM_MAX": 2048,
"BC_SCALE_MAX": 99,
"BC_STRING_MAX": 1000,
"CHARCLASS_NAME_MAX": 2048,
"COLL_WEIGHTS_MAX": 255,
"EQUIV_CLASS_MAX": null,
"EXPR_NEST_MAX": 32,
"LINE_MAX": 2048,
"POSIX2_BC_BASE_MAX": 99,
"POSIX2_BC_DIM_MAX": 2048,
"POSIX2_BC_SCALE_MAX": 99,
"POSIX2_BC_STRING_MAX": 1000,
"POSIX2_CHAR_TERM": 200809,
"POSIX2_COLL_WEIGHTS_MAX": 255,
"POSIX2_C_BIND": 200809,
"POSIX2_C_DEV": 200809,
"POSIX2_C_VERSION": 200809,
"POSIX2_EXPR_NEST_MAX": 32,
"POSIX2_FORT_DEV": null,
"POSIX2_FORT_RUN": null,
"_POSIX2_LINE_MAX": 2048,
"POSIX2_LINE_MAX": 2048,
"POSIX2_LOCALEDEF": 200809,
"POSIX2_RE_DUP_MAX": 32767,
"POSIX2_SW_DEV": 200809,
"POSIX2_UPE": null,
"POSIX2_VERSION": 200809,
"RE_DUP_MAX": 32767,
"PATH": "/usr/bin",
"CS_PATH": "/usr/bin",
"LFS_CFLAGS": null,
"LFS_LDFLAGS": null,
"LFS_LIBS": null,
"LFS_LINTFLAGS": null,
"LFS64_CFLAGS": "-D_LARGEFILE64_SOURCE",
"LFS64_LDFLAGS": null,
"LFS64_LIBS": null,
"LFS64_LINTFLAGS": "-D_LARGEFILE64_SOURCE",
"_XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64",
"XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64",
"_XBS5_ILP32_OFF32": null,
"XBS5_ILP32_OFF32_CFLAGS": null,
"XBS5_ILP32_OFF32_LDFLAGS": null,
"XBS5_ILP32_OFF32_LIBS": null,
"XBS5_ILP32_OFF32_LINTFLAGS": null,
"_XBS5_ILP32_OFFBIG": null,
"XBS5_ILP32_OFFBIG_CFLAGS": null,
"XBS5_ILP32_OFFBIG_LDFLAGS": null,
"XBS5_ILP32_OFFBIG_LIBS": null,
"XBS5_ILP32_OFFBIG_LINTFLAGS": null,
"_XBS5_LP64_OFF64": 1,
"XBS5_LP64_OFF64_CFLAGS": "-m64",
"XBS5_LP64_OFF64_LDFLAGS": "-m64",
"XBS5_LP64_OFF64_LIBS": null,
"XBS5_LP64_OFF64_LINTFLAGS": null,
"_XBS5_LPBIG_OFFBIG": null,
"XBS5_LPBIG_OFFBIG_CFLAGS": null,
"XBS5_LPBIG_OFFBIG_LDFLAGS": null,
"XBS5_LPBIG_OFFBIG_LIBS": null,
"XBS5_LPBIG_OFFBIG_LINTFLAGS": null,
"_POSIX_V6_ILP32_OFF32": null,
"POSIX_V6_ILP32_OFF32_CFLAGS": null,
"POSIX_V6_ILP32_OFF32_LDFLAGS": null,
"POSIX_V6_ILP32_OFF32_LIBS": null,
"POSIX_V6_ILP32_OFF32_LINTFLAGS": null,
"_POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64",
"POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64",
"_POSIX_V6_ILP32_OFFBIG": null,
"POSIX_V6_ILP32_OFFBIG_CFLAGS": null,
"POSIX_V6_ILP32_OFFBIG_LDFLAGS": null,
"POSIX_V6_ILP32_OFFBIG_LIBS": null,
"POSIX_V6_ILP32_OFFBIG_LINTFLAGS": null,
"_POSIX_V6_LP64_OFF64": 1,
"POSIX_V6_LP64_OFF64_CFLAGS": "-m64",
"POSIX_V6_LP64_OFF64_LDFLAGS": "-m64",
"POSIX_V6_LP64_OFF64_LIBS": null,
"POSIX_V6_LP64_OFF64_LINTFLAGS": null,
"_POSIX_V6_LPBIG_OFFBIG": null,
"POSIX_V6_LPBIG_OFFBIG_CFLAGS": null,
"POSIX_V6_LPBIG_OFFBIG_LDFLAGS": null,
"POSIX_V6_LPBIG_OFFBIG_LIBS": null,
"POSIX_V6_LPBIG_OFFBIG_LINTFLAGS": null,
"_POSIX_V7_ILP32_OFF32": null,
"POSIX_V7_ILP32_OFF32_CFLAGS": null,
"POSIX_V7_ILP32_OFF32_LDFLAGS": null,
"POSIX_V7_ILP32_OFF32_LIBS": null,
"POSIX_V7_ILP32_OFF32_LINTFLAGS": null,
"_POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64",
"POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64",
"_POSIX_V7_ILP32_OFFBIG": null,
"POSIX_V7_ILP32_OFFBIG_CFLAGS": null,
"POSIX_V7_ILP32_OFFBIG_LDFLAGS": null,
"POSIX_V7_ILP32_OFFBIG_LIBS": null,
"POSIX_V7_ILP32_OFFBIG_LINTFLAGS": null,
"_POSIX_V7_LP64_OFF64": 1,
"POSIX_V7_LP64_OFF64_CFLAGS": "-m64",
"POSIX_V7_LP64_OFF64_LDFLAGS": "-m64",
"POSIX_V7_LP64_OFF64_LIBS": null,
"POSIX_V7_LP64_OFF64_LINTFLAGS": null,
"_POSIX_V7_LPBIG_OFFBIG": null,
"POSIX_V7_LPBIG_OFFBIG_CFLAGS": null,
"POSIX_V7_LPBIG_OFFBIG_LDFLAGS": null,
"POSIX_V7_LPBIG_OFFBIG_LIBS": null,
"POSIX_V7_LPBIG_OFFBIG_LINTFLAGS": null,
"_POSIX_ADVISORY_INFO": 200809,
"_POSIX_BARRIERS": 200809,
"_POSIX_BASE": null,
"_POSIX_C_LANG_SUPPORT": null,
"_POSIX_C_LANG_SUPPORT_R": null,
"_POSIX_CLOCK_SELECTION": 200809,
"_POSIX_CPUTIME": 200809,
"_POSIX_THREAD_CPUTIME": 200809,
"_POSIX_DEVICE_SPECIFIC": null,
"_POSIX_DEVICE_SPECIFIC_R": null,
"_POSIX_FD_MGMT": null,
"_POSIX_FIFO": null,
"_POSIX_PIPE": null,
"_POSIX_FILE_ATTRIBUTES": null,
"_POSIX_FILE_LOCKING": null,
"_POSIX_FILE_SYSTEM": null,
"_POSIX_MONOTONIC_CLOCK": 200809,
"_POSIX_MULTI_PROCESS": null,
"_POSIX_SINGLE_PROCESS": null,
"_POSIX_NETWORKING": null,
"_POSIX_READER_WRITER_LOCKS": 200809,
"_POSIX_SPIN_LOCKS": 200809,
"_POSIX_REGEXP": 1,
"_REGEX_VERSION": null,
"_POSIX_SHELL": 1,
"_POSIX_SIGNALS": null,
"_POSIX_SPAWN": 200809,
"_POSIX_SPORADIC_SERVER": null,
"_POSIX_THREAD_SPORADIC_SERVER": null,
"_POSIX_SYSTEM_DATABASE": null,
"_POSIX_SYSTEM_DATABASE_R": null,
"_POSIX_TIMEOUTS": 200809,
"_POSIX_TYPED_MEMORY_OBJECTS": null,
"_POSIX_USER_GROUPS": null,
"_POSIX_USER_GROUPS_R": null,
"POSIX2_PBS": null,
"POSIX2_PBS_ACCOUNTING": null,
"POSIX2_PBS_LOCATE": null,
"POSIX2_PBS_TRACK": null,
"POSIX2_PBS_MESSAGE": null,
"SYMLOOP_MAX": null,
"STREAM_MAX": 16,
"AIO_LISTIO_MAX": null,
"AIO_MAX": null,
"AIO_PRIO_DELTA_MAX": 20,
"DELAYTIMER_MAX": 2147483647,
"HOST_NAME_MAX": 64,
"LOGIN_NAME_MAX": 256,
"MQ_OPEN_MAX": null,
"MQ_PRIO_MAX": 32768,
"_POSIX_DEVICE_IO": null,
"_POSIX_TRACE": null,
"_POSIX_TRACE_EVENT_FILTER": null,
"_POSIX_TRACE_INHERIT": null,
"_POSIX_TRACE_LOG": null,
"RTSIG_MAX": 32,
"SEM_NSEMS_MAX": null,
"SEM_VALUE_MAX": 2147483647,
"SIGQUEUE_MAX": 62844,
"FILESIZEBITS": 64,
"POSIX_ALLOC_SIZE_MIN": 4096,
"POSIX_REC_INCR_XFER_SIZE": null,
"POSIX_REC_MAX_XFER_SIZE": null,
"POSIX_REC_MIN_XFER_SIZE": 4096,
"POSIX_REC_XFER_ALIGN": 4096,
"SYMLINK_MAX": null,
"GNU_LIBC_VERSION": "glibc 2.24",
"GNU_LIBPTHREAD_VERSION": "NPTL 2.24",
"POSIX2_SYMLINKS": 1,
"LEVEL1_ICACHE_SIZE": 32768,
"LEVEL1_ICACHE_ASSOC": 8,
"LEVEL1_ICACHE_LINESIZE": 64,
"LEVEL1_DCACHE_SIZE": 32768,
"LEVEL1_DCACHE_ASSOC": 8,
"LEVEL1_DCACHE_LINESIZE": 64,
"LEVEL2_CACHE_SIZE": 262144,
"LEVEL2_CACHE_ASSOC": 8,
"LEVEL2_CACHE_LINESIZE": 64,
"LEVEL3_CACHE_SIZE": 6291456,
"LEVEL3_CACHE_ASSOC": 12,
"LEVEL3_CACHE_LINESIZE": 64,
"LEVEL4_CACHE_SIZE": 0,
"LEVEL4_CACHE_ASSOC": 0,
"LEVEL4_CACHE_LINESIZE": 0,
"IPV6": 200809,
"RAW_SOCKETS": 200809,
"_POSIX_IPV6": 200809,
"_POSIX_RAW_SOCKETS": 200809
},
"init_package": "systemd",
"shells": [
"/bin/sh",
"/bin/bash",
"/sbin/nologin",
"/usr/bin/sh",
"/usr/bin/bash",
"/usr/sbin/nologin",
"/usr/bin/zsh",
"/bin/zsh"
],
"ohai_time": 1492535225.41052,
"cloud_v2": null,
"cloud": null
}
''' # noqa
class TestOhaiCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'ohai']
valid_subsets = ['ohai']
fact_namespace = 'ansible_ohai'
collector_class = OhaiFactCollector
def _mock_module(self):
mock_module = Mock()
mock_module.params = {'gather_subset': self.gather_subset,
'gather_timeout': 10,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value='/not/actually/ohai')
mock_module.run_command = Mock(return_value=(0, ohai_json_output, ''))
return mock_module
@patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.get_ohai_output')
def test_bogus_json(self, mock_get_ohai_output):
module = self._mock_module()
# bogus json
mock_get_ohai_output.return_value = '{'
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict, {})
@patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.run_ohai')
def test_ohai_non_zero_return_code(self, mock_run_ohai):
module = self._mock_module()
# bogus json
mock_run_ohai.return_value = (1, '{}', '')
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
# This assumes no 'ohai' entry at all is correct
self.assertNotIn('ohai', facts_dict)
self.assertEqual(facts_dict, {})
|
htzy/bigfour | refs/heads/master | common/djangoapps/student/migrations/0009_auto__del_courseregistration__add_courseenrollment.py | 188 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CourseRegistration'
db.delete_table('student_courseregistration')
# Adding model 'CourseEnrollment'
db.create_table('student_courseenrollment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('course_id', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('student', ['CourseEnrollment'])
def backwards(self, orm):
# Adding model 'CourseRegistration'
db.create_table('student_courseregistration', (
('course_id', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('student', ['CourseRegistration'])
# Deleting model 'CourseEnrollment'
db.delete_table('student_courseenrollment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
runekaagaard/django-contrib-locking | refs/heads/master | tests/migrations/test_migrations_squashed_complex_multi_apps/app2/1_squashed_2.py | 385 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("app2", "1_auto"),
("app2", "2_auto"),
]
dependencies = [("app1", "1_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
renatopp/vindinium-python | refs/heads/master | vindinium/utils/timer.py | 1 | import time
__all__ = ['Timer']
class Timer(object):
'''Timer helper.
A timer object helps to verify how much time has been taken to execute some
code.
Example:
You can use this class in two ways. First as a with statement::
with Timer() as timer:
# your code here
print timer.elapsed
Notice that you can pass ``True`` as argument to Timer in order to
allow it to print the elapsed time when the with finishes.
Alternatively, you can use Timer like the tic toc functions of Matlab::
timer = Timer()
timer.tic()
# your code here
print timer.toc()
Attributes:
elapsed (float): the elapsed time between ``tic()`` and ``toc()``.
'''
def __init__(self, do_print=False):
'''Constructor.
Args:
do_print (bool): whether timer should print the result after
``with`` ends or not. Default to False.
'''
self._do_print = do_print
self._start_time = 0
self.elapsed = 0
def __enter__(self):
'''Enters with'''
self.tic()
return self
def __exit__(self, type, value, traceback):
'''Leaves with'''
self.toc()
if self._do_print:
print 'Elapsed time is %f seconds.'%self.elapsed
def tic(self):
'''Start the timer.'''
self._start_time = time.time()
def toc(self):
'''Stops the timer and returns the elapsed time.
Returns
(float) the elapsed time.
'''
self.elapsed = time.time() - self._start_time
return self.elapsed
|
steeve/xbmctorrent | refs/heads/master | resources/site-packages/xbmcswift2/mockxbmc/xbmc.py | 20 | import tempfile
import os, errno
from xbmcswift2 import log
from xbmcswift2.cli.create import get_value
TEMP_DIR = os.path.join(tempfile.gettempdir(), 'xbmcswift2_debug')
log.info('Using temp directory %s', TEMP_DIR)
def _create_dir(path):
'''Creates necessary directories for the given path or does nothing
if the directories already exist.
'''
try:
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def log(msg, level=0):
levels = [
'LOGDEBUG',
'LOGINFO',
'LOGNOTICE',
'LOGWARNING',
'LOGERROR',
'LOGSEVERE',
'LOGFATAL',
'LOGNONE',
]
#print '%s - %s' % (levels[level], msg)
def translatePath(path):
'''Creates folders in the OS's temp directory. Doesn't touch any
possible XBMC installation on the machine. Attempting to do as
little work as possible to enable this function to work seamlessly.
'''
valid_dirs = ['xbmc', 'home', 'temp', 'masterprofile', 'profile',
'subtitles', 'userdata', 'database', 'thumbnails', 'recordings',
'screenshots', 'musicplaylists', 'videoplaylists', 'cdrips', 'skin',
]
assert path.startswith('special://'), 'Not a valid special:// path.'
parts = path.split('/')[2:]
assert len(parts) > 1, 'Need at least a single root directory'
assert parts[0] in valid_dirs, '%s is not a valid root dir.' % parts[0]
# We don't want to swallow any potential IOErrors here, so only makedir for
# the root dir, the user is responsible for making any further child dirs
_create_dir(os.path.join(TEMP_DIR, parts[0]))
return os.path.join(TEMP_DIR, *parts)
class Keyboard(object):
def __init__(self, default='', heading='', hidden=False):
self._heading = heading
self._default = default
self._hidden = hidden
self._confirmed = False
self._input = None
def setDefault(self, default):
self._default = default
def setHeading(self, heading):
self._heading = heading
def setHiddenInput(self, hidden):
self._hidden = hidden
def doModal(self):
self._confirmed = False
try:
self._input = get_value(self._heading, self._default, hidden=self._hidden)
self._confirmed = True
except (KeyboardInterrupt, EOFError):
pass
def isConfirmed(self):
return self._confirmed
def getText(self):
return self._input
|
tantexian/sps-2014-12-4 | refs/heads/master | tools/install_venv_common.py | 166 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
|
NusaCode/NusaAccounting | refs/heads/master | atk/attributes/fck/editor/filemanager/connectors/py/upload.py | 53 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
|
fengzhyuan/scikit-learn | refs/heads/master | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
mittya/duoclub | refs/heads/master | duoclub/photos/apps.py | 1 | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class PhotosConfig(AppConfig):
name = 'photos'
verbose_name = '相册列表'
|
holmes/intellij-community | refs/heads/master | python/testData/formatter/wrapBeforeElse_after.py | 79 | id = 1 if looooooooooooooooooooooooong_vaaaaaaaaaaaaaaaar == 'loooooooooooooooong_vaaaaaaaaaaaaaaaaaaaaaaaaaalue' else \
list('foo')[0]
|
jank3/django | refs/heads/master | django/contrib/gis/gdal/raster/const.py | 238 | """
GDAL - Constant definitions
"""
from ctypes import (
c_byte, c_double, c_float, c_int16, c_int32, c_uint16, c_uint32,
)
# See http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4
GDAL_PIXEL_TYPES = {
0: 'GDT_Unknown', # Unknown or unspecified type
1: 'GDT_Byte', # Eight bit unsigned integer
2: 'GDT_UInt16', # Sixteen bit unsigned integer
3: 'GDT_Int16', # Sixteen bit signed integer
4: 'GDT_UInt32', # Thirty-two bit unsigned integer
5: 'GDT_Int32', # Thirty-two bit signed integer
6: 'GDT_Float32', # Thirty-two bit floating point
7: 'GDT_Float64', # Sixty-four bit floating point
8: 'GDT_CInt16', # Complex Int16
9: 'GDT_CInt32', # Complex Int32
10: 'GDT_CFloat32', # Complex Float32
11: 'GDT_CFloat64', # Complex Float64
}
# A list of gdal datatypes that are integers.
GDAL_INTEGER_TYPES = [1, 2, 3, 4, 5]
# Lookup values to convert GDAL pixel type indices into ctypes objects.
# The GDAL band-io works with ctypes arrays to hold data to be written
# or to hold the space for data to be read into. The lookup below helps
# selecting the right ctypes object for a given gdal pixel type.
GDAL_TO_CTYPES = [
None, c_byte, c_uint16, c_int16, c_uint32, c_int32,
c_float, c_double, None, None, None, None
]
# List of resampling algorithms that can be used to warp a GDALRaster.
GDAL_RESAMPLE_ALGORITHMS = {
'NearestNeighbour': 0,
'Bilinear': 1,
'Cubic': 2,
'CubicSpline': 3,
'Lanczos': 4,
'Average': 5,
'Mode': 6,
}
|
ubc/edx-platform | refs/heads/release | openedx/core/djangoapps/course_groups/views.py | 67 | """
Views related to course groups functionality.
"""
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseBadRequest
from django.views.decorators.http import require_http_methods
from util.json_request import expect_json, JsonResponse
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext
import logging
import re
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from edxmako.shortcuts import render_to_response
from . import cohorts
from lms.djangoapps.django_comment_client.utils import get_discussion_category_map, get_discussion_categories_ids
from .models import CourseUserGroup, CourseUserGroupPartitionGroup
log = logging.getLogger(__name__)
def json_http_response(data):
"""
Return an HttpResponse with the data json-serialized and the right content
type header.
"""
return JsonResponse(data)
def split_by_comma_and_whitespace(cstr):
"""
Split a string both by commas and whitespace. Returns a list.
"""
return re.split(r'[\s,]+', cstr)
def link_cohort_to_partition_group(cohort, partition_id, group_id):
"""
Create cohort to partition_id/group_id link.
"""
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=partition_id,
group_id=group_id,
).save()
def unlink_cohort_partition_group(cohort):
"""
Remove any existing cohort to partition_id/group_id link.
"""
CourseUserGroupPartitionGroup.objects.filter(course_user_group=cohort).delete()
# pylint: disable=invalid-name
def _get_course_cohort_settings_representation(course, course_cohort_settings):
"""
Returns a JSON representation of a course cohort settings.
"""
cohorted_course_wide_discussions, cohorted_inline_discussions = get_cohorted_discussions(
course, course_cohort_settings
)
return {
'id': course_cohort_settings.id,
'is_cohorted': course_cohort_settings.is_cohorted,
'cohorted_inline_discussions': cohorted_inline_discussions,
'cohorted_course_wide_discussions': cohorted_course_wide_discussions,
'always_cohort_inline_discussions': course_cohort_settings.always_cohort_inline_discussions,
}
def _get_cohort_representation(cohort, course):
"""
Returns a JSON representation of a cohort.
"""
group_id, partition_id = cohorts.get_group_info_for_cohort(cohort)
assignment_type = cohorts.get_assignment_type(cohort)
return {
'name': cohort.name,
'id': cohort.id,
'user_count': cohort.users.count(),
'assignment_type': assignment_type,
'user_partition_id': partition_id,
'group_id': group_id,
}
def get_cohorted_discussions(course, course_settings):
"""
Returns the course-wide and inline cohorted discussion ids separately.
"""
cohorted_course_wide_discussions = []
cohorted_inline_discussions = []
course_wide_discussions = [topic['id'] for __, topic in course.discussion_topics.items()]
all_discussions = get_discussion_categories_ids(course, None, include_all=True)
for cohorted_discussion_id in course_settings.cohorted_discussions:
if cohorted_discussion_id in course_wide_discussions:
cohorted_course_wide_discussions.append(cohorted_discussion_id)
elif cohorted_discussion_id in all_discussions:
cohorted_inline_discussions.append(cohorted_discussion_id)
return cohorted_course_wide_discussions, cohorted_inline_discussions
@require_http_methods(("GET", "PATCH"))
@ensure_csrf_cookie
@expect_json
@login_required
def course_cohort_settings_handler(request, course_key_string):
"""
The restful handler for cohort setting requests. Requires JSON.
This will raise 404 if user is not staff.
GET
Returns the JSON representation of cohort settings for the course.
PATCH
Updates the cohort settings for the course. Returns the JSON representation of updated settings.
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
cohort_settings = cohorts.get_course_cohort_settings(course_key)
if request.method == 'PATCH':
cohorted_course_wide_discussions, cohorted_inline_discussions = get_cohorted_discussions(
course, cohort_settings
)
settings_to_change = {}
if 'is_cohorted' in request.json:
settings_to_change['is_cohorted'] = request.json.get('is_cohorted')
if 'cohorted_course_wide_discussions' in request.json or 'cohorted_inline_discussions' in request.json:
cohorted_course_wide_discussions = request.json.get(
'cohorted_course_wide_discussions', cohorted_course_wide_discussions
)
cohorted_inline_discussions = request.json.get(
'cohorted_inline_discussions', cohorted_inline_discussions
)
settings_to_change['cohorted_discussions'] = cohorted_course_wide_discussions + cohorted_inline_discussions
if 'always_cohort_inline_discussions' in request.json:
settings_to_change['always_cohort_inline_discussions'] = request.json.get(
'always_cohort_inline_discussions'
)
if not settings_to_change:
return JsonResponse({"error": unicode("Bad Request")}, 400)
try:
cohort_settings = cohorts.set_course_cohort_settings(
course_key, **settings_to_change
)
except ValueError as err:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": unicode(err)}, 400)
return JsonResponse(_get_course_cohort_settings_representation(course, cohort_settings))
@require_http_methods(("GET", "PUT", "POST", "PATCH"))
@ensure_csrf_cookie
@expect_json
@login_required
def cohort_handler(request, course_key_string, cohort_id=None):
"""
The restful handler for cohort requests. Requires JSON.
GET
If a cohort ID is specified, returns a JSON representation of the cohort
(name, id, user_count, assignment_type, user_partition_id, group_id).
If no cohort ID is specified, returns the JSON representation of all cohorts.
This is returned as a dict with the list of cohort information stored under the
key `cohorts`.
PUT or POST or PATCH
If a cohort ID is specified, updates the cohort with the specified ID. Currently the only
properties that can be updated are `name`, `user_partition_id` and `group_id`.
Returns the JSON representation of the updated cohort.
If no cohort ID is specified, creates a new cohort and returns the JSON representation of the updated
cohort.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
if request.method == 'GET':
if not cohort_id:
all_cohorts = [
_get_cohort_representation(c, course)
for c in cohorts.get_course_cohorts(course)
]
return JsonResponse({'cohorts': all_cohorts})
else:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
return JsonResponse(_get_cohort_representation(cohort, course))
else:
name = request.json.get('name')
assignment_type = request.json.get('assignment_type')
if not name:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": "Cohort name must be specified."}, 400)
if not assignment_type:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": "Assignment type must be specified."}, 400)
# If cohort_id is specified, update the existing cohort. Otherwise, create a new cohort.
if cohort_id:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
if name != cohort.name:
if cohorts.is_cohort_exists(course_key, name):
err_msg = ugettext("A cohort with the same name already exists.")
return JsonResponse({"error": unicode(err_msg)}, 400)
cohort.name = name
cohort.save()
try:
cohorts.set_assignment_type(cohort, assignment_type)
except ValueError as err:
return JsonResponse({"error": unicode(err)}, 400)
else:
try:
cohort = cohorts.add_cohort(course_key, name, assignment_type)
except ValueError as err:
return JsonResponse({"error": unicode(err)}, 400)
group_id = request.json.get('group_id')
if group_id is not None:
user_partition_id = request.json.get('user_partition_id')
if user_partition_id is None:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse(
{"error": "If group_id is specified, user_partition_id must also be specified."}, 400
)
existing_group_id, existing_partition_id = cohorts.get_group_info_for_cohort(cohort)
if group_id != existing_group_id or user_partition_id != existing_partition_id:
unlink_cohort_partition_group(cohort)
link_cohort_to_partition_group(cohort, user_partition_id, group_id)
else:
# If group_id was specified as None, unlink the cohort if it previously was associated with a group.
existing_group_id, _ = cohorts.get_group_info_for_cohort(cohort)
if existing_group_id is not None:
unlink_cohort_partition_group(cohort)
return JsonResponse(_get_cohort_representation(cohort, course))
@ensure_csrf_cookie
def users_in_cohort(request, course_key_string, cohort_id):
"""
Return users in the cohort. Show up to 100 per page, and page
using the 'page' GET attribute in the call. Format:
Returns:
Json dump of dictionary in the following format:
{'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': [{'username': ..., 'email': ..., 'name': ...}]
}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
# this will error if called with a non-int cohort_id. That's ok--it
# shouldn't happen for valid clients.
cohort = cohorts.get_cohort_by_id(course_key, int(cohort_id))
paginator = Paginator(cohort.users.all(), 100)
try:
page = int(request.GET.get('page'))
except (TypeError, ValueError):
# These strings aren't user-facing so don't translate them
return HttpResponseBadRequest('Requested page must be numeric')
else:
if page < 0:
return HttpResponseBadRequest('Requested page must be greater than zero')
try:
users = paginator.page(page)
except EmptyPage:
users = [] # When page > number of pages, return a blank page
user_info = [{'username': u.username,
'email': u.email,
'name': '{0} {1}'.format(u.first_name, u.last_name)}
for u in users]
return json_http_response({'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': user_info})
@ensure_csrf_cookie
@require_POST
def add_users_to_cohort(request, course_key_string, cohort_id):
"""
Return json dict of:
{'success': True,
'added': [{'username': ...,
'name': ...,
'email': ...}, ...],
'changed': [{'username': ...,
'name': ...,
'email': ...,
'previous_cohort': ...}, ...],
'present': [str1, str2, ...], # already there
'unknown': [str1, str2, ...]}
Raises Http404 if the cohort cannot be found for the given course.
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
try:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
except CourseUserGroup.DoesNotExist:
raise Http404("Cohort (ID {cohort_id}) not found for {course_key_string}".format(
cohort_id=cohort_id,
course_key_string=course_key_string
))
users = request.POST.get('users', '')
added = []
changed = []
present = []
unknown = []
for username_or_email in split_by_comma_and_whitespace(users):
if not username_or_email:
continue
try:
(user, previous_cohort) = cohorts.add_user_to_cohort(cohort, username_or_email)
info = {
'username': user.username,
'name': user.profile.name,
'email': user.email,
}
if previous_cohort:
info['previous_cohort'] = previous_cohort
changed.append(info)
else:
added.append(info)
except ValueError:
present.append(username_or_email)
except User.DoesNotExist:
unknown.append(username_or_email)
return json_http_response({'success': True,
'added': added,
'changed': changed,
'present': present,
'unknown': unknown})
@ensure_csrf_cookie
@require_POST
def remove_user_from_cohort(request, course_key_string, cohort_id):
"""
Expects 'username': username in POST data.
Return json dict of:
{'success': True} or
{'success': False,
'msg': error_msg}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
username = request.POST.get('username')
if username is None:
return json_http_response({'success': False,
'msg': 'No username specified'})
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
try:
user = User.objects.get(username=username)
cohort.users.remove(user)
return json_http_response({'success': True})
except User.DoesNotExist:
log.debug('no user')
return json_http_response({'success': False,
'msg': "No user '{0}'".format(username)})
def debug_cohort_mgmt(request, course_key_string):
"""
Debugging view for dev.
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
# add staff check to make sure it's safe if it's accidentally deployed.
get_course_with_access(request.user, 'staff', course_key)
context = {'cohorts_url': reverse(
'cohorts',
kwargs={'course_key': course_key.to_deprecated_string()}
)}
return render_to_response('/course_groups/debug.html', context)
@expect_json
@login_required
def cohort_discussion_topics(request, course_key_string):
"""
The handler for cohort discussion categories requests.
This will raise 404 if user is not staff.
Returns the JSON representation of discussion topics w.r.t categories for the course.
Example:
>>> example = {
>>> "course_wide_discussions": {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_cohorted": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> }
>>> "children": ["General"]
>>> },
>>> "inline_discussions" : {
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> "Working with Videos",
>>> "Videos on edX"
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> },
>>> "children": ["Getting Started"]
>>> },
>>> }
>>> }
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
discussion_topics = {}
discussion_category_map = get_discussion_category_map(
course, request.user, cohorted_if_in_list=True, exclude_unstarted=False
)
# We extract the data for the course wide discussions from the category map.
course_wide_entries = discussion_category_map.pop('entries')
course_wide_children = []
inline_children = []
for name in discussion_category_map['children']:
if name in course_wide_entries:
course_wide_children.append(name)
else:
inline_children.append(name)
discussion_topics['course_wide_discussions'] = {
'entries': course_wide_entries,
'children': course_wide_children
}
discussion_category_map['children'] = inline_children
discussion_topics['inline_discussions'] = discussion_category_map
return JsonResponse(discussion_topics)
|
olemis/zebra | refs/heads/master | tests/io/jsonrpclib/SimpleJSONRPCServer.py | 6 | import jsonrpclib
from jsonrpclib import Fault
from jsonrpclib.jsonrpc import USE_UNIX_SOCKETS
import SimpleXMLRPCServer
import SocketServer
import socket
import logging
import os
import types
import traceback
import sys
try:
import fcntl
except ImportError:
# For Windows
fcntl = None
def get_version(request):
# must be a dict
if 'jsonrpc' in request.keys():
return 2.0
if 'id' in request.keys():
return 1.0
return None
def validate_request(request):
if type(request) is not types.DictType:
fault = Fault(
-32600, 'Request must be {}, not %s.' % type(request)
)
return fault
rpcid = request.get('id', None)
version = get_version(request)
if not version:
fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid)
return fault
request.setdefault('params', [])
method = request.get('method', None)
params = request.get('params')
param_types = (types.ListType, types.DictType, types.TupleType)
if not method or type(method) not in types.StringTypes or \
type(params) not in param_types:
fault = Fault(
-32600, 'Invalid request parameters or method.', rpcid=rpcid
)
return fault
return True
class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
def __init__(self, encoding=None):
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
allow_none=True,
encoding=encoding)
def _marshaled_dispatch(self, data, dispatch_method = None):
response = None
try:
request = jsonrpclib.loads(data)
except Exception, e:
fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e))
response = fault.response()
return response
if not request:
fault = Fault(-32600, 'Request invalid -- no request data.')
return fault.response()
if type(request) is types.ListType:
# This SHOULD be a batch, by spec
responses = []
for req_entry in request:
result = validate_request(req_entry)
if type(result) is Fault:
responses.append(result.response())
continue
resp_entry = self._marshaled_single_dispatch(req_entry)
if resp_entry is not None:
responses.append(resp_entry)
if len(responses) > 0:
response = '[%s]' % ','.join(responses)
else:
response = ''
else:
result = validate_request(request)
if type(result) is Fault:
return result.response()
response = self._marshaled_single_dispatch(request)
return response
def _marshaled_single_dispatch(self, request):
# TODO - Use the multiprocessing and skip the response if
# it is a notification
# Put in support for custom dispatcher here
# (See SimpleXMLRPCServer._marshaled_dispatch)
method = request.get('method')
params = request.get('params')
try:
response = self._dispatch(method, params)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
if 'id' not in request.keys() or request['id'] == None:
# It's a notification
return None
try:
response = jsonrpclib.dumps(response,
methodresponse=True,
rpcid=request['id']
)
return response
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
def _dispatch(self, method, params):
func = None
try:
func = self.funcs[method]
except KeyError:
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
try:
func = SimpleXMLRPCServer.resolve_dotted_attribute(
self.instance,
method,
True
)
except AttributeError:
pass
if func is not None:
try:
if type(params) is types.ListType:
response = func(*params)
else:
response = func(**params)
return response
except TypeError:
return Fault(-32602, 'Invalid parameters.')
except:
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' %
trace_string)
return fault
else:
return Fault(-32601, 'Method %s not supported.' % method)
class SimpleJSONRPCRequestHandler(
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
def do_POST(self):
if not self.is_rpc_path_valid():
self.report_404()
return
try:
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
response = self.server._marshaled_dispatch(data)
self.send_response(200)
except Exception, e:
self.send_response(500)
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string)
response = fault.response()
if response == None:
response = ''
self.send_header("Content-type", "application/json-rpc")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
self.wfile.flush()
self.connection.shutdown(1)
class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher):
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
logRequests=True, encoding=None, bind_and_activate=True,
address_family=socket.AF_INET):
self.logRequests = logRequests
SimpleJSONRPCDispatcher.__init__(self, encoding)
# TCPServer.__init__ has an extra parameter on 2.6+, so
# check Python version and decide on how to call it
vi = sys.version_info
self.address_family = address_family
if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
# Unix sockets can't be bound if they already exist in the
# filesystem. The convention of e.g. X11 is to unlink
# before binding again.
if os.path.exists(addr):
try:
os.unlink(addr)
except OSError:
logging.warning("Could not unlink socket %s", addr)
# if python 2.5 and lower
if vi[0] < 3 and vi[1] < 6:
SocketServer.TCPServer.__init__(self, addr, requestHandler)
else:
SocketServer.TCPServer.__init__(self, addr, requestHandler,
bind_and_activate)
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
def __init__(self, encoding=None):
SimpleJSONRPCDispatcher.__init__(self, encoding)
def handle_jsonrpc(self, request_text):
response = self._marshaled_dispatch(request_text)
print 'Content-Type: application/json-rpc'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
handle_xmlrpc = handle_jsonrpc
|
FedoraScientific/salome-med | refs/heads/master | src/MEDOP/tui/xmedpy/tests/test_xmed_fieldOperations.py | 1 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This file is a set of basic use case to test (from the python
# context) the functions developped in MED modules for the field
# operations.
#
# (gboulant - 16/6/2011)
#
import xmed
from xmed import properties
from xmed import fieldproxy
from xmed.fieldproxy import FieldProxy
#from xmed.fieldtools import dup, stat, get, save
#from xmed.fieldguide import doc
# Don't forget to set the globals dictionnary for the fields tools to
# work properly
xmed.setConsoleGlobals(globals())
# Load some test data in the MedDataManager
filepath = properties.testFilePath
xmed.dataManager.addDatasource(filepath)
fieldHandlerList = xmed.dataManager.getFieldHandlerList()
def setup():
"""
This function defines a set of field variable for quick tests in
the python console. You just have to execute the function to get
the variables defined in the global context.
"""
fh1=fieldHandlerList[0]
fh2=fieldHandlerList[1]
f1 = FieldProxy(fh1)
f2 = FieldProxy(fh2)
return fh1, fh2, f1, f2
# Setup for quick tests in the python console
fh1, fh2, f1, f2 = setup()
def TEST_addition():
fieldHandler0 = fieldHandlerList[0]
fieldHandler1 = fieldHandlerList[1]
# The addition can be done using field handler directly
addFieldHandler = xmed.calculator.add(fieldHandler0, fieldHandler1)
print addFieldHandler
# Or with a field proxy that ease the writing of operations
fieldProxy0 = FieldProxy(fieldHandler0)
fieldProxy1 = FieldProxy(fieldHandler1)
res = fieldProxy0 + fieldProxy1
if res is None: return False
return True
def TEST_arithmetics():
fieldProxy0 = FieldProxy(fieldHandlerList[0])
fieldProxy1 = FieldProxy(fieldHandlerList[1])
# Standard operations where operandes are fields
res = fieldProxy0 + fieldProxy1
if res is None: return False
res = fieldProxy0 - fieldProxy1
if res is None: return False
res = fieldProxy0 * fieldProxy1
if res is None: return False
res = fieldProxy0 / fieldProxy1
# Standard operations with scalar operandes
res = fieldProxy0 + 3.4
if res is None: return False
res = 3.4 + fieldProxy0
if res is None: return False
res = fieldProxy0 - 3.4
if res is None: return False
res = 3.4 - fieldProxy0
if res is None: return False
res = fieldProxy0 * 3.4
if res is None: return False
res = 3.4 * fieldProxy0
if res is None: return False
res = fieldProxy0 / 3.4
if res is None: return False
res = 3.4 / fieldProxy0
if res is None: return False
return True
def TEST_unary_operations():
fieldProxy0 = FieldProxy(fieldHandlerList[0])
res = fieldProxy0.dup()
if res is None: return False
res = xmed.dup(fieldProxy0)
if res is None: return False
res = pow(fieldProxy0,2)
if res is None: return False
return True
def TEST_composition():
# In this test, we combine operandes that are supposed
# to be compatible. We expect that no error occur.
fieldProxy0 = FieldProxy(fieldHandlerList[0])
fieldProxy1 = FieldProxy(fieldHandlerList[1])
res = pow(fieldProxy0,2) + fieldProxy1
if res is None: return False
return True
def TEST_litteral_equation():
fieldProxy0 = FieldProxy(fieldHandlerList[0])
res = fieldProxy0.ope("abs(u)^2")
if res is None: return False
return True
def TEST_use_restriction():
fieldProxy0 = FieldProxy(fieldHandlerList[0])
res = fieldProxy0("c=1;g='toto'")
if res is None: return False
return True
def TEST_modification_of_attributes():
fieldProxy0 = FieldProxy(fieldHandlerList[0])
id_ref = fieldProxy0.id
fieldname_ref = fieldProxy0.fieldname
meshname_ref = fieldProxy0.meshname
#
# This operations are not allowed, or not that way
#
# This should print that it is not allowed:
fieldProxy0.id = id_ref+3
if fieldProxy0.id != id_ref:
print "ERR: the id should be %d (%d found)"%(id_ref,fieldProxy0.id)
return False
# This should print that it must be done using the command update
fieldProxy0.fieldname = fieldname_ref+"toto"
if fieldProxy0.fieldname != fieldname_ref:
print "ERR: the fieldname should be %s (%s found)"%(fieldname_ref,fieldProxy0.fieldname)
return False
# This should print that it is not allowed:
fieldProxy0.meshname = meshname_ref+"titi"
if fieldProxy0.meshname != meshname_ref:
print "ERR: the meshname should be %s (%s found)"%(meshname_ref,fieldProxy0.meshname)
return False
return True
def TEST_update_metadata():
fieldProxyRef = FieldProxy(fieldHandlerList[0])
id = fieldProxyRef.id
name_ref = "toto"
fieldProxyRef.update(name=name_ref)
fieldProxyRes = xmed.get(id)
name_res = fieldProxyRes.fieldname
if name_res != name_ref:
print "ERR: the fieldname should be %s (%s found)"%(name_ref,name_res)
return False
return True
#
# =============================================================
# Unit tests
# =============================================================
#
import unittest
from salome.kernel import pyunittester
class MyTestSuite(unittest.TestCase):
def test_addition(self):
result = pyunittester.execAndConvertExceptionToBoolean(TEST_addition)
self.assertTrue(result)
def test_arithmetics(self):
result = pyunittester.execAndConvertExceptionToBoolean(TEST_arithmetics)
self.assertTrue(result)
def test_unary_operations(self):
result = pyunittester.execAndConvertExceptionToBoolean(TEST_unary_operations)
self.assertTrue(result)
def test_composition(self):
result = pyunittester.execAndConvertExceptionToBoolean(TEST_composition)
self.assertTrue(result)
def test_litteral_equation(self):
result = pyunittester.execAndConvertExceptionToBoolean(TEST_litteral_equation)
self.assertTrue(result)
def test_modification_of_attributes(self):
self.assertTrue(TEST_modification_of_attributes())
def test_update_metadata(self):
self.assertTrue(TEST_update_metadata())
def myunittests():
pyunittester.run(MyTestSuite)
def myusecases():
TEST_addition()
#TEST_arithmetics()
#TEST_unary_operations()
#TEST_update_metadata()
#TEST_composition()
if __name__ == "__main__":
#myusecases()
myunittests()
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.