repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
WikipediaLibrary/TWLight
|
refs/heads/master
|
TWLight/users/migrations/0066_move_editcounts_to_log.py
|
1
|
# Generated by Django 3.0.11 on 2020-11-20 19:32
from django.db import migrations
def move_wp_editcounts_to_logs(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
EditorLog = apps.get_model("users", "EditorLog")
for editor in Editor.objects.all():
if (
editor.wp_editcount_prev
and editor.wp_editcount_prev_updated
and (editor.wp_editcount_prev_updated != editor.wp_editcount_updated)
):
log = EditorLog()
log.editor = editor
log.editcount = editor.wp_editcount_prev
log.timestamp = editor.wp_editcount_prev_updated
log.save()
if editor.wp_editcount and editor.wp_editcount_updated:
log = EditorLog()
log.editor = editor
log.editcount = editor.wp_editcount
log.timestamp = editor.wp_editcount_updated
log.save()
class Migration(migrations.Migration):
dependencies = [
("users", "0065_editorlog"),
]
operations = [migrations.RunPython(move_wp_editcounts_to_logs)]
|
ahnqirage/thrift
|
refs/heads/master
|
lib/py/src/transport/__init__.py
|
348
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__all__ = ['TTransport', 'TSocket', 'THttpClient', 'TZlibTransport']
|
DiptoDas8/Biponi
|
refs/heads/master
|
lib/python2.7/site-packages/braintree/util/constants.py
|
5
|
class Constants(object):
@staticmethod
def get_all_constant_values_from_class(klass):
return [klass.__dict__[item] for item in dir(klass) if not item.startswith("__")]
|
liu602348184/django
|
refs/heads/master
|
django/contrib/sites/requests.py
|
695
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __str__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
|
agry/NGECore2
|
refs/heads/master
|
scripts/expertise/expertise_en_sweeping_pirouette_1.py
|
2
|
import sys
def addAbilities(core, actor, player):
if actor.getLevel() >= 26:
actor.addAbility("en_sweeping_pirouette_0")
if actor.getLevel() >= 38:
actor.addAbility("en_sweeping_pirouette_1")
if actor.getLevel() >= 50:
actor.addAbility("en_sweeping_pirouette_2")
if actor.getLevel() >= 62:
actor.addAbility("en_sweeping_pirouette_3")
if actor.getLevel() >= 74:
actor.addAbility("en_sweeping_pirouette_4")
if actor.getLevel() >= 86:
actor.addAbility("en_sweeping_pirouette_5")
return
def removeAbilities(core, actor, player):
actor.removeAbility("en_sweeping_pirouette_0")
return
|
lancezlin/ml_template_py
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/packaging/utils.py
|
1126
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
|
tvwerkhoven/pretty-plots
|
refs/heads/master
|
matplotlib-ref-density.py
|
1
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# @file matplotlib-ref-density.py -- matplotlib example script
# Copyright (C) 2011--2013 Tim van Werkhoven (timvanwerkhoven@gmail.com)
#
# This work is licensed under the Creative Commons Attribution-Share Alike
# 3.0 Unported License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to Creative
# Commons, 171 Second Street, Suite 300, San Francisco, California,94105, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import pylab as plt
import numpy as np
import matplotlib
# Make colormap based on Paul Tol's best visibility gradients. See
# <http://www.sron.nl/~pault/> for more info on these colors. Also see
# <http://matplotlib.sourceforge.net/api/colors_api.html>
# and <http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps> on some
# matplotlib examples
# Deviation around zero colormap (blue--red)
cols = []
for x in np.linspace(0,1, 256):
rcol = 0.237 - 2.13*x + 26.92*x**2 - 65.5*x**3 + 63.5*x**4 - 22.36*x**5
gcol = ((0.572 + 1.524*x - 1.811*x**2)/(1 - 0.291*x + 0.1574*x**2))**2
bcol = 1/(1.579 - 4.03*x + 12.92*x**2 - 31.4*x**3 + 48.6*x**4 - 23.36*x**5)
cols.append((rcol, gcol, bcol))
cm_plusmin = matplotlib.colors.LinearSegmentedColormap.from_list("PaulT_plusmin", cols)
# This colormap is very similar to the built-in cmap RdYlBu, see <http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps>
# Linear colormap (white--red)
from scipy.special import erf
cols = []
for x in np.linspace(0,1, 256):
rcol = (1 - 0.392*(1 + erf((x - 0.869)/ 0.255)))
gcol = (1.021 - 0.456*(1 + erf((x - 0.527)/ 0.376)))
bcol = (1 - 0.493*(1 + erf((x - 0.272)/ 0.309)))
cols.append((rcol, gcol, bcol))
cm_linear = matplotlib.colors.LinearSegmentedColormap.from_list("PaulT_linear", cols)
# This colormap is very similar to the built-in cmap YlOrBr, see <http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps>
# Linear colormap (rainbow)
cols = [(0,0,0)]
for x in np.linspace(0,1, 254):
rcol = (0.472-0.567*x+4.05*x**2)/(1.+8.72*x-19.17*x**2+14.1*x**3)
gcol = 0.108932-1.22635*x+27.284*x**2-98.577*x**3+163.3*x**4-131.395*x**5+40.634*x**6
bcol = 1./(1.97+3.54*x-68.5*x**2+243*x**3-297*x**4+125*x**5)
cols.append((rcol, gcol, bcol))
cols.append((1,1,1))
cm_rainbow = matplotlib.colors.LinearSegmentedColormap.from_list("PaulT_rainbow", cols)
# This colormap is close to the built-in cmap 'Spectral', see <http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps>
# Plot examples
tmpim = np.arange(256).reshape(1,-1)
print "Generating matplotlib-ref-density-plusmin.*..."
plt.figure(1); plt.clf()
plt.title("www.sron.nl/~pault plus-min variation colormap (like RdYlBu)")
plt.imshow(tmpim, cmap=plt.get_cmap(cm_plusmin), aspect='auto')
plt.savefig("matplotlib-ref-density-plusmin.pdf")
plt.savefig("matplotlib-ref-density-plusmin.eps")
print "Generating matplotlib-ref-density-linear.*..."
plt.figure(2); plt.clf()
plt.title("www.sron.nl/~pault linear colormap (like YlOrBr)")
plt.imshow(tmpim, cmap=plt.get_cmap(cm_linear), aspect='auto')
plt.savefig("matplotlib-ref-density-linear.pdf")
plt.savefig("matplotlib-ref-density-linear.eps")
print "Generating matplotlib-ref-density-rainbow.*..."
plt.figure(3); plt.clf()
plt.title("www.sron.nl/~pault rainbow colormap (like Spectral)")
plt.imshow(tmpim, cmap=plt.get_cmap(cm_rainbow), aspect='auto')
plt.savefig("matplotlib-ref-rainbow.pdf")
plt.savefig("matplotlib-ref-rainbow.eps")
# EOF
|
ProfessionalIT/maxigenios-website
|
refs/heads/master
|
sdk/google_appengine/google/appengine/_internal/django/core/files/images.py
|
23
|
"""
Utility functions for handling images.
Requires PIL, as you might imagine.
"""
from google.appengine._internal.django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import ImageFile as PILImageFile
except ImportError:
import ImageFile as PILImageFile
p = PILImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
while 1:
data = file.read(1024)
if not data:
break
p.feed(data)
if p.image:
return p.image.size
return None
finally:
if close:
file.close()
else:
file.seek(file_pos)
|
jermowery/xos
|
refs/heads/master
|
xos/core/models/reservation.py
|
3
|
import os
import datetime
from django.db import models
from core.models import PlCoreBase
from core.models import Instance
from core.models import Slice
from core.models import ServiceResource
# Create your models here.
class Reservation(PlCoreBase):
startTime = models.DateTimeField()
slice = models.ForeignKey(Slice, related_name="reservations")
duration = models.IntegerField(default=1)
def __unicode__(self): return u'%s to %s' % (self.startTime, self.endTime)
@property
def endTime(self):
return self.startTime + datetime.timedelta(hours=self.duration)
def can_update(self, user):
return user.can_update_slice(self.slice)
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = Reservation.objects.all()
else:
slice_ids = [s.id for s in Slice.select_by_user(user)]
qs = Reservation.objects.filter(id__in=slice_ids)
return qs
class ReservedResource(PlCoreBase):
instance = models.ForeignKey(Instance, related_name="reservedresources")
resource = models.ForeignKey(ServiceResource, related_name="reservedresources")
quantity = models.IntegerField(default=1)
reservationSet = models.ForeignKey(Reservation, related_name="reservedresources")
class Meta(PlCoreBase.Meta):
verbose_name_plural = "Reserved Resources"
def __unicode__(self): return u'%d %s on %s' % (self.quantity, self.resource, self.instance)
def can_update(self, user):
return user.can_update(self.instance.slice)
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = ReservedResource.objects.all()
else:
instance_ids = [s.id for s in Instance.select_by_user(user)]
qs = ReservedResource.objects.filter(id__in=instance_ids)
return qs
|
SnappleCap/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/modeltests/tablespaces/models.py
|
150
|
from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'syncdb'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'tablespaces_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'tablespaces_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').rel.through
Authors._meta.db_table = 'tablespaces_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').rel.through
Reviewers._meta.db_table = 'tablespaces_articleref_reviewers'
|
lidan-fnst/samba
|
refs/heads/master
|
source4/dsdb/tests/python/dirsync.py
|
2
|
#!/usr/bin/env python
#
# Unit tests for dirsync control
# Copyright (C) Matthieu Patou <mat@matws.net> 2011
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2014
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import optparse
import sys
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import TestProgram, SubunitOptions
import samba.getopt as options
import base64
from ldb import LdbError, SCOPE_BASE
from ldb import Message, MessageElement, Dn
from ldb import FLAG_MOD_ADD, FLAG_MOD_DELETE
from samba.dcerpc import security, misc, drsblobs, security
from samba.ndr import ndr_unpack, ndr_pack
from samba.auth import system_session
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials, DONT_USE_KERBEROS
import samba.tests
from samba.tests import delete_force
parser = optparse.OptionParser("dirsync.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args.pop()
if not "://" in host:
ldaphost = "ldap://%s" % host
ldapshost = "ldaps://%s" % host
else:
ldaphost = host
start = host.rindex("://")
host = host.lstrip(start+3)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
#
# Tests start here
#
class DirsyncBaseTests(samba.tests.TestCase):
def setUp(self):
super(DirsyncBaseTests, self).setUp()
self.ldb_admin = SamDB(ldapshost, credentials=creds, session_info=system_session(lp), lp=lp)
self.base_dn = self.ldb_admin.domain_dn()
self.domain_sid = security.dom_sid(self.ldb_admin.get_domain_sid())
self.user_pass = samba.generate_random_password(12, 16)
self.configuration_dn = self.ldb_admin.get_config_basedn().get_linearized()
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
#used for anonymous login
print "baseDN: %s" % self.base_dn
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(target_password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
creds_tmp.set_kerberos_state(DONT_USE_KERBEROS) # kinit is too expensive to use in a tight loop
ldb_target = SamDB(url=ldaphost, credentials=creds_tmp, lp=lp)
return ldb_target
#tests on ldap add operations
class SimpleDirsyncTests(DirsyncBaseTests):
def setUp(self):
super(SimpleDirsyncTests, self).setUp()
# Regular user
self.dirsync_user = "test_dirsync_user"
self.simple_user = "test_simple_user"
self.admin_user = "test_admin_user"
self.ouname = None
self.ldb_admin.newuser(self.dirsync_user, self.user_pass)
self.ldb_admin.newuser(self.simple_user, self.user_pass)
self.ldb_admin.newuser(self.admin_user, self.user_pass)
self.desc_sddl = self.sd_utils.get_sd_as_sddl(self.base_dn)
user_sid = self.sd_utils.get_object_sid(self.get_user_dn(self.dirsync_user))
mod = "(OA;;CR;%s;;%s)" % (security.GUID_DRS_GET_CHANGES,
str(user_sid))
self.sd_utils.dacl_add_ace(self.base_dn, mod)
# add admins to the Domain Admins group
self.ldb_admin.add_remove_group_members("Domain Admins", [self.admin_user],
add_members_operation=True)
def tearDown(self):
super(SimpleDirsyncTests, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn(self.dirsync_user))
delete_force(self.ldb_admin, self.get_user_dn(self.simple_user))
delete_force(self.ldb_admin, self.get_user_dn(self.admin_user))
if self.ouname:
delete_force(self.ldb_admin, self.ouname)
self.sd_utils.modify_sd_on_dn(self.base_dn, self.desc_sddl)
try:
self.ldb_admin.deletegroup("testgroup")
except Exception:
pass
#def test_dirsync_errors(self):
def test_dirsync_supported(self):
"""Test the basic of the dirsync is supported"""
self.ldb_dirsync = self.get_ldb_connection(self.dirsync_user, self.user_pass)
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_admin.search(self.base_dn, expression="samaccountname=*", controls=["dirsync:1:0:1"])
res = self.ldb_dirsync.search(self.base_dn, expression="samaccountname=*", controls=["dirsync:1:0:1"])
try:
self.ldb_simple.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
def test_parentGUID_referrals(self):
res2 = self.ldb_admin.search(self.base_dn, scope=SCOPE_BASE, attrs=["objectGUID"])
res = self.ldb_admin.search(self.base_dn,
expression="name=Configuration",
controls=["dirsync:1:0:1"])
self.assertEqual(res2[0].get("objectGUID"), res[0].get("parentGUID"))
def test_ok_not_rootdc(self):
"""Test if it's ok to do dirsync on another NC that is not the root DC"""
self.ldb_admin.search(self.ldb_admin.get_config_basedn(),
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
def test_dirsync_errors(self):
"""Test if dirsync returns the correct LDAP errors in case of pb"""
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
self.ldb_dirsync = self.get_ldb_connection(self.dirsync_user, self.user_pass)
try:
self.ldb_simple.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_simple.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_simple.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:1:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_UNWILLING_TO_PERFORM") != -1)
try:
self.ldb_dirsync.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_admin.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_admin.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:1:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_UNWILLING_TO_PERFORM") != -1)
def test_dirsync_attributes(self):
"""Check behavior with some attributes """
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
# Check that nTSecurityDescriptor is returned as it's the case when doing dirsync
self.assertTrue(res.msgs[0].get("ntsecuritydescriptor") != None)
# Check that non replicated attributes are not returned
self.assertTrue(res.msgs[0].get("badPwdCount") == None)
# Check that non forward link are not returned
self.assertTrue(res.msgs[0].get("memberof") == None)
# Asking for instanceType will return also objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["instanceType"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# We don't return an entry if asked for objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["objectGUID"],
controls=["dirsync:1:0:1"])
self.assertEquals(len(res.msgs), 0)
# a request on the root of a NC didn't return parentGUID
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("name") != None)
self.assertTrue(res.msgs[0].get("parentGUID") == None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# Asking for name will return also objectGUID and parentGUID
# and instanceType and of course name
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["name"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("name") != None)
self.assertTrue(res.msgs[0].get("parentGUID") != None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# Asking for dn will not return not only DN but more like if attrs=*
# parentGUID should be returned
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["dn"],
controls=["dirsync:1:0:1"])
count = len(res.msgs[0])
res2 = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
controls=["dirsync:1:0:1"])
count2 = len(res2.msgs[0])
self.assertEqual(count, count2)
# Asking for cn will return nothing on objects that have CN as RDN
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["cn"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 0)
# Asking for parentGUID will return nothing too
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["parentGUID"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 0)
ouname="OU=testou,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement("test ou",
FLAG_MOD_ADD,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="name=testou",
attrs=["cn"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
delete_force(self.ldb_admin, ouname)
def test_dirsync_with_controls(self):
"""Check that dirsync return correct informations when dealing with the NC"""
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:10000", "extended_dn:1", "show_deleted:1"])
def test_dirsync_basenc(self):
"""Check that dirsync return correct informations when dealing with the NC"""
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
def test_dirsync_othernc(self):
"""Check that dirsync return information for entries that are normaly referrals (ie. other NCs)"""
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["name"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 4)
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=domaindns)",
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
nb = len(res.msgs)
# only sub nc returns a result when asked for objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=domaindns)",
attrs=["objectGUID"],
controls=["dirsync:1:0:0"])
self.assertEqual(len(res.msgs), nb - 1)
if nb > 1:
self.assertTrue(res.msgs[0].get("objectGUID") != None)
else:
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["objectGUID"],
controls=["dirsync:1:0:0"])
def test_dirsync_send_delta(self):
"""Check that dirsync return correct delta when sending the last cookie"""
res = self.ldb_admin.search(self.base_dn,
expression="(&(samaccountname=test*)(!(isDeleted=*)))",
controls=["dirsync:1:0:10000"])
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control = str(":".join(ctl))
res = self.ldb_admin.search(self.base_dn,
expression="(&(samaccountname=test*)(!(isDeleted=*)))",
controls=[control])
self.assertEqual(len(res), 0)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:100000"])
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control2 = str(":".join(ctl))
# Let's create an OU
ouname="OU=testou2,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control2])
self.assertEqual(len(res), 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control3 = str(":".join(ctl))
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement("test ou",
FLAG_MOD_ADD,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertEqual(len(res.msgs), 1)
# 3 attributes: instanceType, cn and objectGUID
self.assertEqual(len(res.msgs[0]), 3)
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement([],
FLAG_MOD_DELETE,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertEqual(len(res.msgs), 1)
# So we won't have much attribute returned but instanceType and GUID
# are.
# 3 attributes: instanceType and objectGUID and cn but empty
self.assertEqual(len(res.msgs[0]), 3)
ouname = "OU=newouname,%s" % self.base_dn
self.ldb_admin.rename(str(res[0].dn), str(Dn(self.ldb_admin, ouname)))
self.ouname = ouname
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control4 = str(":".join(ctl))
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertTrue(res[0].get("parentGUID") != None)
self.assertTrue(res[0].get("name") != None)
delete_force(self.ldb_admin, ouname)
def test_dirsync_linkedattributes(self):
"""Check that dirsync returnd deleted objects too"""
# Let's search for members
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=["dirsync:1:1:1"])
self.assertTrue(len(res[0].get("member")) > 0)
size = len(res[0].get("member"))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=True)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member")), size + 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# remove the user from the group
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=False)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member")), size )
self.ldb_admin.newgroup("testgroup")
self.ldb_admin.add_remove_group_members("testgroup", [self.simple_user],
add_members_operation=True)
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
controls=["dirsync:1:0:1"])
self.assertEqual(len(res[0].get("member")), 1)
self.assertTrue(res[0].get("member") != "" )
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "1"
control1 = str(":".join(ctl))
# Check that reasking the same question but with an updated cookie
# didn't return any results.
print control1
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
controls=[control1])
self.assertEqual(len(res), 0)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("testgroup", [self.simple_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
attrs=["member"],
controls=[control1])
self.ldb_admin.deletegroup("testgroup")
self.assertEqual(len(res[0].get("member")), 0)
def test_dirsync_deleted_items(self):
"""Check that dirsync returnd deleted objects too"""
# Let's create an OU
ouname="OU=testou3,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:1"])
guid = None
for e in res:
if str(e["name"]) == "testou3":
guid = str(ndr_unpack(misc.GUID,e.get("objectGUID")[0]))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# So now delete the object and check that
# we can see the object but deleted when admin
delete_force(self.ldb_admin, ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(objectClass=organizationalUnit)",
controls=[control1])
self.assertEqual(len(res), 1)
guid2 = str(ndr_unpack(misc.GUID,res[0].get("objectGUID")[0]))
self.assertEqual(guid2, guid)
self.assertTrue(res[0].get("isDeleted"))
self.assertTrue(res[0].get("name") != None)
def test_cookie_from_others(self):
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:1"])
ctl = str(res.controls[0]).split(":")
cookie = ndr_unpack(drsblobs.ldapControlDirSyncCookie, base64.b64decode(str(ctl[4])))
cookie.blob.guid1 = misc.GUID("128a99bf-abcd-1234-abcd-1fb625e530db")
controls=["dirsync:1:0:0:%s" % base64.b64encode(ndr_pack(cookie))]
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=controls)
class ExtendedDirsyncTests(SimpleDirsyncTests):
def test_dirsync_linkedattributes(self):
flag_incr_linked = 2147483648
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_admin.search(self.base_dn,
attrs=["member"],
expression="(name=Administrators)",
controls=["dirsync:1:%d:1" % flag_incr_linked])
self.assertTrue(res[0].get("member;range=1-1") != None )
self.assertTrue(len(res[0].get("member;range=1-1")) > 0)
size = len(res[0].get("member;range=1-1"))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=True)
self.ldb_admin.add_remove_group_members("Administrators", [self.dirsync_user],
add_members_operation=True)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member;range=1-1")), 2)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control1 = str(":".join(ctl))
# remove the user from the group
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control2 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.dirsync_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control2])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 1)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 2)
def test_dirsync_deleted_items(self):
"""Check that dirsync returnd deleted objects too"""
# Let's create an OU
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
ouname="OU=testou3,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
# Specify LDAP_DIRSYNC_OBJECT_SECURITY
res = self.ldb_simple.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:1:1"])
guid = None
for e in res:
if str(e["name"]) == "testou3":
guid = str(ndr_unpack(misc.GUID,e.get("objectGUID")[0]))
self.assertTrue(guid != None)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# So now delete the object and check that
# we can see the object but deleted when admin
# we just see the objectGUID when simple user
delete_force(self.ldb_admin, ouname)
res = self.ldb_simple.search(self.base_dn,
expression="(objectClass=organizationalUnit)",
controls=[control1])
self.assertEqual(len(res), 1)
guid2 = str(ndr_unpack(misc.GUID,res[0].get("objectGUID")[0]))
self.assertEqual(guid2, guid)
self.assertEqual(str(res[0].dn), "")
if not getattr(opts, "listtests", False):
lp = sambaopts.get_loadparm()
samba.tests.cmdline_credentials = credopts.get_credentials(lp)
TestProgram(module=__name__, opts=subunitopts)
|
o5k/openerp-oemedical-v0.1
|
refs/heads/master
|
openerp/addons/hr_timesheet_sheet/wizard/__init__.py
|
443
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_current
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
spirali/shampoo
|
refs/heads/master
|
src/base/paths.py
|
1
|
#
# Copyright (C) 2014 Stanislav Bohm
#
# This file is part of Shampoo.
#
# Shampoo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Shampoo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Shampoo. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
SHAMPOO_ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
RESOURCES = os.path.join(SHAMPOO_ROOT, "resources")
SHADERS = os.path.join(RESOURCES, "shaders")
def makedir_if_not_exists(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
|
silenceli/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_microversions.py
|
6
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class MicroversionsTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_no_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
self.assertEqual("2.1", res.headers['X-OpenStack-Compute-API-Version'])
self.assertEqual("X-OpenStack-Compute-API-Version",
res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header_non_default(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
self.assertEqual("2.3", res.headers['X-OpenStack-Compute-API-Version'])
self.assertEqual("X-OpenStack-Compute-API-Version",
res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header_fault(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.0")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '3.0'}
res = req.get_response(app)
self.assertEqual(400, res.status_int)
self.assertEqual("3.0", res.headers['X-OpenStack-Compute-API-Version'])
self.assertEqual("X-OpenStack-Compute-API-Version",
res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_with_header(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_with_header_exact_match(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '2.2'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_no_2_1_version(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val1', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_later_version(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.1")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '3.0'}
res = req.get_response(app)
self.assertEqual(202, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val2', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_high(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '3.2'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_low(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '2.1'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_global_version_too_high(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '3.7'}
res = req.get_response(app)
self.assertEqual(406, res.status_int)
res_json = jsonutils.loads(res.body)
self.assertEqual("Version 3.7 is not supported by the API. "
"Minimum is 2.1 and maximum is 3.5.",
res_json['computeFault']['message'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3')
req.method = 'POST'
req.headers = {'X-OpenStack-Compute-API-Version': '2.2'}
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dumps({'dummy': {'val': 'foo'}})
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('create_val1', resp_json['param'])
self.assertEqual("2.2", res.headers['X-OpenStack-Compute-API-Version'])
self.assertEqual("X-OpenStack-Compute-API-Version",
res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_fail(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3')
req.method = 'POST'
req.headers = {'X-OpenStack-Compute-API-Version': '2.2'}
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dumps({'dummy': {'invalid_param': 'foo'}})
res = req.get_response(app)
self.assertEqual(400, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertTrue(resp_json['badRequest']['message'].startswith(
"Invalid input for field/attribute dummy."))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_out_of_version_check(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3/1')
req.method = 'PUT'
req.headers = {'X-OpenStack-Compute-API-Version': '2.2'}
req.body = jsonutils.dumps({'dummy': {'inv_val': 'foo'}})
req.environ['CONTENT_TYPE'] = "application/json"
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
self.assertEqual("2.2", res.headers['X-OpenStack-Compute-API-Version'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_second_version(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3/1')
req.headers = {'X-OpenStack-Compute-API-Version': '2.10'}
req.environ['CONTENT_TYPE'] = "application/json"
req.method = 'PUT'
req.body = jsonutils.dumps({'dummy': {'val2': 'foo'}})
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
self.assertEqual("2.10",
res.headers['X-OpenStack-Compute-API-Version'])
|
bhgv/bCNC
|
refs/heads/master
|
EditorPage.py
|
1
|
# -*- coding: ascii -*-
# $Id$
#
# Author: vvlachoudis@gmail.com
# Date: 18-Jun-2015
__author__ = "Vasilis Vlachoudis"
__email__ = "vvlachoudis@gmail.com"
try:
from Tkinter import *
except ImportError:
from tkinter import *
import tkExtra
import Utils
import Ribbon
import CNCList
import CNCRibbon
from CNCCanvas import ACTION_MOVE, ACTION_ORIGIN
#===============================================================================
# Clipboard Group
#===============================================================================
class ClipboardGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Clipboard"), app)
self.grid2rows()
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Paste>>",
image=Utils.icons["paste32"],
text=_("Paste"),
compound=TOP,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
b.grid(row=0, column=0, rowspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Paste [Ctrl-V]"))
self.addWidget(b)
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Cut>>",
image=Utils.icons["cut"],
text=_("Cut"),
compound=LEFT,
anchor=W,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
tkExtra.Balloon.set(b, _("Cut [Ctrl-X]"))
b.grid(row=0, column=1, padx=0, pady=1, sticky=NSEW)
self.addWidget(b)
# ---
b = Ribbon.LabelButton(self.frame, self, "<<Copy>>",
image=Utils.icons["copy"],
text=_("Copy"),
compound=LEFT,
anchor=W,
takefocus=FALSE,
background=Ribbon._BACKGROUND)
tkExtra.Balloon.set(b, _("Copy [Ctrl-C]"))
b.grid(row=1, column=1, padx=0, pady=1, sticky=NSEW)
self.addWidget(b)
#===============================================================================
# Select Group
#===============================================================================
class SelectGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Select"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame, app, "<<SelectAll>>",
image=Utils.icons["select_all"],
text=_("All"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Select all blocks [Ctrl-A]"))
self.addWidget(b)
# ---
col += 1
b = Ribbon.LabelButton(self.frame, app, "<<SelectNone>>",
image=Utils.icons["select_none"],
text=_("None"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Unselect all blocks [Ctrl-Shift-A]"))
self.addWidget(b)
# ---
col,row=0,1
b = Ribbon.LabelButton(self.frame, app, "<<SelectInvert>>",
image=Utils.icons["select_invert"],
text=_("Invert"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Invert selection [Ctrl-I]"))
self.addWidget(b)
# ---
col += 1
b = Ribbon.LabelButton(self.frame, app, "<<SelectLayer>>",
image=Utils.icons["select_layer"],
text=_("Layer"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Select all blocks from current layer"))
self.addWidget(b)
# ---
col, row = 0,2
self.filterString = tkExtra.LabelEntry(self.frame,
"Filter",
"DarkGray",
background="White",
width=16)
self.filterString.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(self.filterString, _("Filter blocks"))
self.addWidget(self.filterString)
self.filterString.bind("<Return>", self.filter)
self.filterString.bind("<KP_Enter>", self.filter)
#-----------------------------------------------------------------------
def filter(self, event=None):
txt = self.filterString.get()
self.app.insertCommand("FILTER %s"%(txt), True)
#===============================================================================
# Edit Group
#===============================================================================
class EditGroup(CNCRibbon.ButtonMenuGroup):
def __init__(self, master, app):
CNCRibbon.ButtonMenuGroup.__init__(self, master, N_("Edit"), app,
[(_("Import"), "load", lambda a=app:a.insertCommand("IMPORT",True)),
(_("Inkscape"), "inkscape", lambda a=app:a.insertCommand("INKSCAPE all",True)),
(_("Round"), "digits", lambda s=app:s.insertCommand("ROUND", True)),
(_("Statistics"),"stats", app.showStats)
])
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame, app, "<<Add>>",
image=Utils.icons["add"],
text=_("Add"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Insert a new block or line of code [Ins or Ctrl-Enter]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, app, "<<Clone>>",
image=Utils.icons["clone"],
text=_("Clone"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Clone selected lines or blocks [Ctrl-D]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, app, "<<Delete>>",
image=Utils.icons["x"],
text=_("Delete"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Delete selected lines or blocks [Del]"))
self.addWidget(b)
# ---
col,row=1,0
b = Ribbon.LabelButton(self.frame, self.app, "<<EnableToggle>>",
image=Utils.icons["toggle"],
#text=_("Toggle"),
#compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Toggle enable/disable block of g-code [Ctrl-L]"))
self.addWidget(b)
menulist = [ (_("Enable"), "enable",
lambda a=self.app : a.event_generate("<<Enable>>")),
(_("Disable"), "disable",
lambda a=self.app : a.event_generate("<<Disable>>"))]
b = Ribbon.MenuButton(self.frame, menulist,
text=_("Active"),
image=Utils.icons["triangle_down"],
compound=RIGHT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col+1, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Enable or disable blocks of gcode"))
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self.app, "<<Expand>>",
image=Utils.icons["expand"],
text=_("Expand"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Toggle expand/collapse blocks of gcode [Ctrl-E]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["comment"],
text=_("Comment"),
compound=LEFT,
anchor=W,
state=DISABLED,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, columnspan=2, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("(Un)Comment selected lines"))
self.addWidget(b)
# ---
col,row=3,1
b = Ribbon.LabelButton(self.frame, self.app, "<<ChangeColor>>",
image=Utils.icons["color"],
text=_("Color"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Change color for block of g-code"))
self.addWidget(b)
#===============================================================================
# Move Group
#===============================================================================
class MoveGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Move"), app)
self.grid3rows()
# ===
col,row = 0,0
b = Ribbon.LabelRadiobutton(self.frame,
image=Utils.icons["move32"],
text=_("Move"),
compound=TOP,
anchor=W,
variable=app.canvas.actionVar,
value=ACTION_MOVE,
command=app.canvas.setActionMove,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, rowspan=3, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move objects [M]"))
self.addWidget(b)
# ===
col += 1
row = 0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["TL"],
text=_("T-L"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE TL",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Top-Left corner"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["LC"],
text=_("L"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE LC",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Left side"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["BL"],
text=_("B-L"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE BL",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Bottom-Left corner"))
self.addWidget(b)
# ====
col += 1
row = 0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["TC"],
text=_("Top"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE TC",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Top side"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["center"],
text=_("Center"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE CENTER",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to center"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["BC"],
text=_("Bottom"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE BC",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Bottom side"))
self.addWidget(b)
# ===
col += 1
row = 0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["TR"],
text=_("T-R"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE TR",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Top-Right corner"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["RC"],
text=_("R"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE RC",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Right side"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["BR"],
text=_("B-R"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MOVE BR",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move origin of g-code to Bottom-Right corner"))
self.addWidget(b)
# ---
col += 1
row = 1
b = Ribbon.LabelRadiobutton(self.frame,
image=Utils.icons["origin"],
text=_("Origin"),
compound=LEFT,
anchor=W,
variable=app.canvas.actionVar,
value=ACTION_ORIGIN,
command=app.canvas.setActionOrigin,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move all gcode such as origin is on mouse location [O]"))
self.addWidget(b)
#===============================================================================
# Order Group
#===============================================================================
class OrderGroup(CNCRibbon.ButtonMenuGroup):
def __init__(self, master, app):
CNCRibbon.ButtonMenuGroup.__init__(self, master, N_("Order"), app,
[(_("Optimize"), "optimize", lambda a=app:a.insertCommand("OPTIMIZE",True)),
])
self.grid2rows()
# ===
col,row=0,0
b = Ribbon.LabelButton(self.frame, self, "<Control-Key-Prior>",
image=Utils.icons["up"],
text=_("Up"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move selected g-code up [Ctrl-Up, Ctrl-PgUp]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self, "<Control-Key-Next>",
image=Utils.icons["down"],
text=_("Down"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Move selected g-code down [Ctrl-Down, Ctrl-PgDn]"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame, self, "<<Invert>>",
image=Utils.icons["swap"],
text=_("Invert"),
compound=LEFT,
anchor=W,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Invert cutting order of selected blocks"))
self.addWidget(b)
#===============================================================================
# Transform Group
#===============================================================================
class TransformGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Transform"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate90"],
text=_("CW"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE CW",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode clock-wise (-90deg)"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate180"],
text=_("Flip"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE FLIP",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode by 180deg"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["rotate270"],
text=_("CCW"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("ROTATE CCW",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Rotate selected gcode counter-clock-wise (90deg)"))
self.addWidget(b)
# ---
col,row=1,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["flip-horizontal"],
text=_("Horizontal"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MIRROR horizontal",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Mirror horizontally X=-X selected gcode"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["flip-vertical"],
text=_("Vertical"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("MIRROR vertical",True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Mirror vertically Y=-Y selected gcode"))
self.addWidget(b)
# submenu.add_command(label=_("Rotate command"), underline=0,
# command=lambda s=self:s.insertCommand("ROTATE ang x0 y0", False))
#===============================================================================
# Route Group
#===============================================================================
class RouteGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Route"), app)
self.grid3rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["conventional"],
text=_("Conventional"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("DIRECTION CONVENTIONAL", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Change cut direction to conventional for selected gcode blocks"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["climb"],
text=_("Climb"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("DIRECTION CLIMB", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Change cut direction to climb for selected gcode blocks"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["reverse"],
text=_("Reverse"),
compound=LEFT,
anchor=W,
command=lambda s=app:s.insertCommand("REVERSE", True),
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Reverse cut direction for selected gcode blocks"))
self.addWidget(b)
#===============================================================================
# Info Group
#===============================================================================
class InfoGroup(CNCRibbon.ButtonGroup):
def __init__(self, master, app):
CNCRibbon.ButtonGroup.__init__(self, master, N_("Info"), app)
self.grid2rows()
# ---
col,row=0,0
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["stats"],
text=_("Statistics"),
compound=LEFT,
anchor=W,
command=app.showStats,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Show statistics for enabled gcode"))
self.addWidget(b)
# ---
row += 1
b = Ribbon.LabelButton(self.frame,
image=Utils.icons["info"],
text=_("Info"),
compound=LEFT,
anchor=W,
command=app.showInfo,
background=Ribbon._BACKGROUND)
b.grid(row=row, column=col, padx=0, pady=0, sticky=NSEW)
tkExtra.Balloon.set(b, _("Show cutting information on selected blocks [Ctrl-n]"))
self.addWidget(b)
#===============================================================================
# Main Frame of Editor
#===============================================================================
class EditorFrame(CNCRibbon.PageFrame):
def __init__(self, master, app):
CNCRibbon.PageFrame.__init__(self, master, "Editor", app)
self.editor = CNCList.CNCListbox(self, app,
selectmode=EXTENDED,
exportselection=0,
background="White")
self.editor.pack(side=LEFT,expand=TRUE, fill=BOTH)
self.addWidget(self.editor)
sb = Scrollbar(self, orient=VERTICAL, command=self.editor.yview)
sb.pack(side=RIGHT, fill=Y)
self.editor.config(yscrollcommand=sb.set)
#===============================================================================
# Editor Page
#===============================================================================
class EditorPage(CNCRibbon.Page):
__doc__ = _("GCode editor")
_name_ = N_("Editor")
_icon_ = "edit"
#----------------------------------------------------------------------
# Add a widget in the widgets list to enable disable during the run
#----------------------------------------------------------------------
def register(self):
self._register((ClipboardGroup, SelectGroup, EditGroup, MoveGroup,
OrderGroup, TransformGroup, RouteGroup, InfoGroup),
(EditorFrame,))
|
3rdcycle/pyqtgraph
|
refs/heads/develop
|
pyqtgraph/multiprocess/remoteproxy.py
|
4
|
import os, time, sys, traceback, weakref
import numpy as np
try:
import __builtin__ as builtins
import cPickle as pickle
except ImportError:
import builtins
import pickle
# color printing for debugging
from ..util import cprint
class ClosedError(Exception):
"""Raised when an event handler receives a request to close the connection
or discovers that the connection has been closed."""
pass
class NoResultError(Exception):
"""Raised when a request for the return value of a remote call fails
because the call has not yet returned."""
pass
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
def __init__(self, connection, name, pid, debug=False):
self.debug = debug
self.conn = connection
self.name = name
self.results = {} ## reqId: (status, result); cache of request results received from the remote process
## status is either 'result' or 'error'
## if 'error', then result will be (exception, formatted exceprion)
## where exception may be None if it could not be passed through the Connection.
self.proxies = {} ## maps {weakref(proxy): proxyId}; used to inform the remote process when a proxy has been deleted.
## attributes that affect the behavior of the proxy.
## See ObjectProxy._setProxyOptions for description
self.proxyOptions = {
'callSync': 'sync', ## 'sync', 'async', 'off'
'timeout': 10, ## float
'returnType': 'auto', ## 'proxy', 'value', 'auto'
'autoProxy': False, ## bool
'deferGetattr': False, ## True, False
'noProxyTypes': [ type(None), str, int, float, tuple, list, dict, LocalObjectProxy, ObjectProxy ],
}
self.nextRequestId = 0
self.exited = False
RemoteEventHandler.handlers[pid] = self ## register this handler as the one communicating with pid
@classmethod
def getHandler(cls, pid):
try:
return cls.handlers[pid]
except:
print(pid, cls.handlers)
raise
def debugMsg(self, msg):
if not self.debug:
return
cprint.cout(self.debug, "[%d] %s\n" % (os.getpid(), str(msg)), -1)
def getProxyOption(self, opt):
return self.proxyOptions[opt]
def setProxyOptions(self, **kwds):
"""
Set the default behavior options for object proxies.
See ObjectProxy._setProxyOptions for more info.
"""
self.proxyOptions.update(kwds)
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
if self.exited:
self.debugMsg(' processRequests: exited already; raise ClosedError.')
raise ClosedError()
numProcessed = 0
while self.conn.poll():
try:
self.handleRequest()
numProcessed += 1
except ClosedError:
self.debugMsg('processRequests: got ClosedError from handleRequest; setting exited=True.')
self.exited = True
raise
#except IOError as err: ## let handleRequest take care of this.
#self.debugMsg(' got IOError from handleRequest; try again.')
#if err.errno == 4: ## interrupted system call; try again
#continue
#else:
#raise
except:
print("Error in process %s" % self.name)
sys.excepthook(*sys.exc_info())
if numProcessed > 0:
self.debugMsg('processRequests: finished %d requests' % numProcessed)
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
Blocks until a request is available."""
result = None
while True:
try:
## args, kwds are double-pickled to ensure this recv() call never fails
cmd, reqId, nByteMsgs, optStr = self.conn.recv()
break
except EOFError:
self.debugMsg(' handleRequest: got EOFError from recv; raise ClosedError.')
## remote process has shut down; end event loop
raise ClosedError()
except IOError as err:
if err.errno == 4: ## interrupted system call; try again
self.debugMsg(' handleRequest: got IOError 4 from recv; try again.')
continue
else:
self.debugMsg(' handleRequest: got IOError %d from recv (%s); raise ClosedError.' % (err.errno, err.strerror))
raise ClosedError()
self.debugMsg(" handleRequest: received %s %s" % (str(cmd), str(reqId)))
## read byte messages following the main request
byteData = []
if nByteMsgs > 0:
self.debugMsg(" handleRequest: reading %d byte messages" % nByteMsgs)
for i in range(nByteMsgs):
while True:
try:
byteData.append(self.conn.recv_bytes())
break
except EOFError:
self.debugMsg(" handleRequest: got EOF while reading byte messages; raise ClosedError.")
raise ClosedError()
except IOError as err:
if err.errno == 4:
self.debugMsg(" handleRequest: got IOError 4 while reading byte messages; try again.")
continue
else:
self.debugMsg(" handleRequest: got IOError while reading byte messages; raise ClosedError.")
raise ClosedError()
try:
if cmd == 'result' or cmd == 'error':
resultId = reqId
reqId = None ## prevents attempt to return information from this request
## (this is already a return from a previous request)
opts = pickle.loads(optStr)
self.debugMsg(" handleRequest: id=%s opts=%s" % (str(reqId), str(opts)))
#print os.getpid(), "received request:", cmd, reqId, opts
returnType = opts.get('returnType', 'auto')
if cmd == 'result':
self.results[resultId] = ('result', opts['result'])
elif cmd == 'error':
self.results[resultId] = ('error', (opts['exception'], opts['excString']))
elif cmd == 'getObjAttr':
result = getattr(opts['obj'], opts['attr'])
elif cmd == 'callObj':
obj = opts['obj']
fnargs = opts['args']
fnkwds = opts['kwds']
## If arrays were sent as byte messages, they must be re-inserted into the
## arguments
if len(byteData) > 0:
for i,arg in enumerate(fnargs):
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnargs[i] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
for k,arg in fnkwds.items():
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnkwds[k] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
if len(fnkwds) == 0: ## need to do this because some functions do not allow keyword arguments.
try:
result = obj(*fnargs)
except:
print("Failed to call object %s: %d, %s" % (obj, len(fnargs), fnargs[1:]))
raise
else:
result = obj(*fnargs, **fnkwds)
elif cmd == 'getObjValue':
result = opts['obj'] ## has already been unpickled into its local value
returnType = 'value'
elif cmd == 'transfer':
result = opts['obj']
returnType = 'proxy'
elif cmd == 'transferArray':
## read array data from next message:
result = np.fromstring(byteData[0], dtype=opts['dtype']).reshape(opts['shape'])
returnType = 'proxy'
elif cmd == 'import':
name = opts['module']
fromlist = opts.get('fromlist', [])
mod = builtins.__import__(name, fromlist=fromlist)
if len(fromlist) == 0:
parts = name.lstrip('.').split('.')
result = mod
for part in parts[1:]:
result = getattr(result, part)
else:
result = map(mod.__getattr__, fromlist)
elif cmd == 'del':
LocalObjectProxy.releaseProxyId(opts['proxyId'])
#del self.proxiedObjects[opts['objId']]
elif cmd == 'close':
if reqId is not None:
result = True
returnType = 'value'
exc = None
except:
exc = sys.exc_info()
if reqId is not None:
if exc is None:
self.debugMsg(" handleRequest: sending return value for %d: %s" % (reqId, str(result)))
#print "returnValue:", returnValue, result
if returnType == 'auto':
result = self.autoProxy(result, self.proxyOptions['noProxyTypes'])
elif returnType == 'proxy':
result = LocalObjectProxy(result)
try:
self.replyResult(reqId, result)
except:
sys.excepthook(*sys.exc_info())
self.replyError(reqId, *sys.exc_info())
else:
self.debugMsg(" handleRequest: returning exception for %d" % reqId)
self.replyError(reqId, *exc)
elif exc is not None:
sys.excepthook(*exc)
if cmd == 'close':
if opts.get('noCleanup', False) is True:
os._exit(0) ## exit immediately, do not pass GO, do not collect $200.
## (more importantly, do not call any code that would
## normally be invoked at exit)
else:
raise ClosedError()
def replyResult(self, reqId, result):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print("error: %s %s %s" % (self.name, str(reqId), str(exc[1])))
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
except:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=None, excString=excStr))
def send(self, request, opts=None, reqId=None, callSync='sync', timeout=10, returnType=None, byteData=None, **kwds):
"""Send a request or return packet to the remote process.
Generally it is not necessary to call this method directly; it is for internal use.
(The docstring has information that is nevertheless useful to the programmer
as it describes the internal protocol used to communicate between processes)
============== ====================================================================
**Arguments:**
request String describing the type of request being sent (see below)
reqId Integer uniquely linking a result back to the request that generated
it. (most requests leave this blank)
callSync 'sync': return the actual result of the request
'async': return a Request object which can be used to look up the
result later
'off': return no result
timeout Time in seconds to wait for a response when callSync=='sync'
opts Extra arguments sent to the remote process that determine the way
the request will be handled (see below)
returnType 'proxy', 'value', or 'auto'
byteData If specified, this is a list of objects to be sent as byte messages
to the remote process.
This is used to send large arrays without the cost of pickling.
============== ====================================================================
Description of request strings and options allowed for each:
============= ============= ========================================================
request option description
------------- ------------- --------------------------------------------------------
getObjAttr Request the remote process return (proxy to) an
attribute of an object.
obj reference to object whose attribute should be
returned
attr string name of attribute to return
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
callObj Request the remote process call a function or
method. If a request ID is given, then the call's
return value will be sent back (or information
about the error that occurred while running the
function)
obj the (reference to) object to call
args tuple of arguments to pass to callable
kwds dict of keyword arguments to pass to callable
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
getObjValue Request the remote process return the value of
a proxied object (must be picklable)
obj reference to object whose value should be returned
transfer Copy an object to the remote process and request
it return a proxy for the new object.
obj The object to transfer.
import Request the remote process import new symbols
and return proxy(ies) to the imported objects
module the string name of the module to import
fromlist optional list of string names to import from module
del Inform the remote process that a proxy has been
released (thus the remote process may be able to
release the original object)
proxyId id of proxy which is no longer referenced by
remote host
close Instruct the remote process to stop its event loop
and exit. Optionally, this request may return a
confirmation.
result Inform the remote process that its request has
been processed
result return value of a request
error Inform the remote process that its request failed
exception the Exception that was raised (or None if the
exception could not be pickled)
excString string-formatted version of the exception and
traceback
============= =====================================================================
"""
#if len(kwds) > 0:
#print "Warning: send() ignored args:", kwds
if opts is None:
opts = {}
assert callSync in ['off', 'sync', 'async'], 'callSync must be one of "off", "sync", or "async"'
if reqId is None:
if callSync != 'off': ## requested return value; use the next available request ID
reqId = self.nextRequestId
self.nextRequestId += 1
else:
## If requestId is provided, this _must_ be a response to a previously received request.
assert request in ['result', 'error']
if returnType is not None:
opts['returnType'] = returnType
#print os.getpid(), "send request:", request, reqId, opts
## double-pickle args to ensure that at least status and request ID get through
try:
optStr = pickle.dumps(opts)
except:
print("==== Error pickling this object: ====")
print(opts)
print("=======================================")
raise
nByteMsgs = 0
if byteData is not None:
nByteMsgs = len(byteData)
## Send primary request
request = (request, reqId, nByteMsgs, optStr)
self.debugMsg('send request: cmd=%s nByteMsgs=%d id=%s opts=%s' % (str(request[0]), nByteMsgs, str(reqId), str(opts)))
self.conn.send(request)
## follow up by sending byte messages
if byteData is not None:
for obj in byteData: ## Remote process _must_ be prepared to read the same number of byte messages!
self.conn.send_bytes(obj)
self.debugMsg(' sent %d byte messages' % len(byteData))
self.debugMsg(' call sync: %s' % callSync)
if callSync == 'off':
return
req = Request(self, reqId, description=str(request), timeout=timeout)
if callSync == 'async':
return req
if callSync == 'sync':
try:
return req.result()
except NoResultError:
return req
def close(self, callSync='off', noCleanup=False, **kwds):
self.send(request='close', opts=dict(noCleanup=noCleanup), callSync=callSync, **kwds)
def getResult(self, reqId):
## raises NoResultError if the result is not available yet
#print self.results.keys(), os.getpid()
if reqId not in self.results:
try:
self.processRequests()
except ClosedError: ## even if remote connection has closed, we may have
## received new data during this call to processRequests()
pass
if reqId not in self.results:
raise NoResultError()
status, result = self.results.pop(reqId)
if status == 'result':
return result
elif status == 'error':
#print ''.join(result)
exc, excStr = result
if exc is not None:
print("===== Remote process raised exception on request: =====")
print(''.join(excStr))
print("===== Local Traceback to request follows: =====")
raise exc
else:
print(''.join(excStr))
raise Exception("Error getting result. See above for exception from remote process.")
else:
raise Exception("Internal error.")
def _import(self, mod, **kwds):
"""
Request the remote process import a module (or symbols from a module)
and return the proxied results. Uses built-in __import__() function, but
adds a bit more processing:
_import('module') => returns module
_import('module.submodule') => returns submodule
(note this differs from behavior of __import__)
_import('module', fromlist=[name1, name2, ...]) => returns [module.name1, module.name2, ...]
(this also differs from behavior of __import__)
"""
return self.send(request='import', callSync='sync', opts=dict(module=mod), **kwds)
def getObjAttr(self, obj, attr, **kwds):
return self.send(request='getObjAttr', opts=dict(obj=obj, attr=attr), **kwds)
def getObjValue(self, obj, **kwds):
return self.send(request='getObjValue', opts=dict(obj=obj), **kwds)
def callObj(self, obj, args, kwds, **opts):
opts = opts.copy()
args = list(args)
## Decide whether to send arguments by value or by proxy
noProxyTypes = opts.pop('noProxyTypes', None)
if noProxyTypes is None:
noProxyTypes = self.proxyOptions['noProxyTypes']
autoProxy = opts.pop('autoProxy', self.proxyOptions['autoProxy'])
if autoProxy is True:
args = [self.autoProxy(v, noProxyTypes) for v in args]
for k, v in kwds.iteritems():
opts[k] = self.autoProxy(v, noProxyTypes)
byteMsgs = []
## If there are arrays in the arguments, send those as byte messages.
## We do this because pickling arrays is too expensive.
for i,arg in enumerate(args):
if arg.__class__ == np.ndarray:
args[i] = ("__byte_message__", len(byteMsgs), (arg.dtype, arg.shape))
byteMsgs.append(arg)
for k,v in kwds.items():
if v.__class__ == np.ndarray:
kwds[k] = ("__byte_message__", len(byteMsgs), (v.dtype, v.shape))
byteMsgs.append(v)
return self.send(request='callObj', opts=dict(obj=obj, args=args, kwds=kwds), byteData=byteMsgs, **opts)
def registerProxy(self, proxy):
ref = weakref.ref(proxy, self.deleteProxy)
self.proxies[ref] = proxy._proxyId
def deleteProxy(self, ref):
proxyId = self.proxies.pop(ref)
try:
self.send(request='del', opts=dict(proxyId=proxyId), callSync='off')
except IOError: ## if remote process has closed down, there is no need to send delete requests anymore
pass
def transfer(self, obj, **kwds):
"""
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
if obj.__class__ is np.ndarray:
opts = {'dtype': obj.dtype, 'shape': obj.shape}
return self.send(request='transferArray', opts=opts, byteData=[obj], **kwds)
else:
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
def autoProxy(self, obj, noProxyTypes):
## Return object wrapped in LocalObjectProxy _unless_ its type is in noProxyTypes.
for typ in noProxyTypes:
if isinstance(obj, typ):
return obj
return LocalObjectProxy(obj)
class Request(object):
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
self.reqId = reqId
self.gotResult = False
self._result = None
self.timeout = timeout
def result(self, block=True, timeout=None):
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
If the process's connection has closed before the result arrives, raise ClosedError.
"""
if self.gotResult:
return self._result
if timeout is None:
timeout = self.timeout
if block:
start = time.time()
while not self.hasResult():
if self.proc.exited:
raise ClosedError()
time.sleep(0.005)
if timeout >= 0 and time.time() - start > timeout:
print("Request timed out: %s" % self.description)
import traceback
traceback.print_stack()
raise NoResultError()
return self._result
else:
self._result = self.proc.getResult(self.reqId) ## raises NoResultError if result is not available yet
self.gotResult = True
return self._result
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
self.result(block=False)
except NoResultError:
pass
return self.gotResult
class LocalObjectProxy(object):
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@classmethod
def registerObject(cls, obj):
## assign it a unique ID so we can keep a reference to the local object
pid = cls.nextProxyId
cls.nextProxyId += 1
cls.proxiedObjects[pid] = obj
#print "register:", cls.proxiedObjects
return pid
@classmethod
def lookupProxyId(cls, pid):
return cls.proxiedObjects[pid]
@classmethod
def releaseProxyId(cls, pid):
del cls.proxiedObjects[pid]
#print "release:", cls.proxiedObjects
def __init__(self, obj, **opts):
"""
Create a 'local' proxy object that, when sent to a remote host,
will appear as a normal ObjectProxy to *obj*.
Any extra keyword arguments are passed to proxy._setProxyOptions()
on the remote side.
"""
self.processId = os.getpid()
#self.objectId = id(obj)
self.typeStr = repr(obj)
#self.handler = handler
self.obj = obj
self.opts = opts
def __reduce__(self):
## a proxy is being pickled and sent to a remote process.
## every time this happens, a new proxy will be generated in the remote process,
## so we keep a new ID so we can track when each is released.
pid = LocalObjectProxy.registerObject(self.obj)
return (unpickleObjectProxy, (self.processId, pid, self.typeStr, None, self.opts))
## alias
proxy = LocalObjectProxy
def unpickleObjectProxy(processId, proxyId, typeStr, attributes=None, opts=None):
if processId == os.getpid():
obj = LocalObjectProxy.lookupProxyId(proxyId)
if attributes is not None:
for attr in attributes:
obj = getattr(obj, attr)
return obj
else:
proxy = ObjectProxy(processId, proxyId=proxyId, typeStr=typeStr)
if opts is not None:
proxy._setProxyOptions(**opts)
return proxy
class ObjectProxy(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
|
thomasrogers03/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
|
194
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks Xcode project files."""
import re
class XcodeProjectFileChecker(object):
"""Processes Xcode project file lines for checking style."""
def __init__(self, file_path, handle_style_error):
self.file_path = file_path
self.handle_style_error = handle_style_error
self.handle_style_error.turn_off_line_filtering()
self._development_region_regex = re.compile('developmentRegion = (?P<region>.+);')
def _check_development_region(self, line_index, line):
"""Returns True when developmentRegion is detected."""
matched = self._development_region_regex.search(line)
if not matched:
return False
if matched.group('region') != 'English':
self.handle_style_error(line_index,
'xcodeproj/settings', 5,
'developmentRegion is not English.')
return True
def check(self, lines):
development_region_is_detected = False
for line_index, line in enumerate(lines):
if self._check_development_region(line_index, line):
development_region_is_detected = True
if not development_region_is_detected:
self.handle_style_error(len(lines),
'xcodeproj/settings', 5,
'Missing "developmentRegion = English".')
|
SilverLiningSystems/cxmanage-test2
|
refs/heads/master
|
cxmanage_api/tests/fabric_test.py
|
4
|
# pylint: disable=protected-access
# pylint: disable=too-many-public-methods
# Copyright (c) 2012-2013, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Calxeda: fabric_test.py """
import random
import unittest
from mock import call
from cxmanage_api.fabric import Fabric
from cxmanage_api.tftp import InternalTftp, ExternalTftp
from cxmanage_api.firmware_package import FirmwarePackage
from cxmanage_api.cx_exceptions import CommandFailedError
from cxmanage_api.tests import DummyNode, DummyFailNode
class FabricTest(unittest.TestCase):
""" Test the various Fabric commands """
def setUp(self):
# Set up the controller and add targets
self.fabric = Fabric(DummyNode.ip_addresses[0], node=DummyNode)
self.nodes = [DummyNode(i) for i in DummyNode.ip_addresses]
self.fabric._nodes = dict((i, self.nodes[i])
for i in xrange(len(self.nodes)))
def test_tftp(self):
""" Test the tftp property """
tftp = InternalTftp()
self.fabric.tftp = tftp
self.assertTrue(self.fabric.tftp is tftp)
for node in self.nodes:
self.assertTrue(node.tftp is tftp)
tftp = ExternalTftp("127.0.0.1")
self.fabric.tftp = tftp
self.assertTrue(self.fabric.tftp is tftp)
for node in self.nodes:
self.assertTrue(node.tftp is tftp)
def test_get_mac_addresses(self):
""" Test get_mac_addresses command """
self.fabric.get_mac_addresses()
self.assertEqual(
self.nodes[0].method_calls,
[call.get_fabric_macaddrs()]
)
for node in self.nodes[1:]:
self.assertEqual(node.method_calls, [])
def test_get_uplink_info(self):
""" Test get_uplink_info command """
self.fabric.get_uplink_info()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_uplink_info()])
def test_get_uplink_speed(self):
""" Test get_uplink_speed command """
self.fabric.get_uplink_speed()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_uplink_speed()])
def test_get_uplink(self):
""" Test get_uplink command """
self.assertEqual(self.fabric.get_uplink(iface=0), 0)
def test_set_uplink(self):
""" Test set_uplink command """
iface, uplink = 0, 0
self.fabric.set_uplink(iface=iface, uplink=uplink)
self.assertEqual(
self.nodes[0].bmc.method_calls,
[call.fabric_config_set_uplink(iface=iface, uplink=uplink)]
)
def test_get_sensors(self):
""" Test get_sensors command """
self.fabric.get_sensors()
self.fabric.get_sensors("Node Power")
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.get_sensors(""), call.get_sensors("Node Power")
])
def test_get_firmware_info(self):
""" Test get_firmware_info command """
self.fabric.get_firmware_info()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_firmware_info()])
def test_is_updatable(self):
""" Test is_updatable command """
package = FirmwarePackage()
self.fabric.is_updatable(package)
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.is_updatable(package, "INACTIVE", None)
])
def test_update_firmware(self):
""" Test update_firmware command """
package = FirmwarePackage()
self.fabric.update_firmware(package)
for node in self.nodes:
self.assertEqual(node.method_calls, [
call.update_firmware(package, "INACTIVE", None)
])
def test_config_reset(self):
""" Test config_reset command """
self.fabric.config_reset()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.config_reset()])
def test_set_boot_order(self):
""" Test set_boot_order command """
boot_args = "disk0,pxe,retry"
self.fabric.set_boot_order(boot_args)
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.set_boot_order(boot_args)]
)
def test_get_boot_order(self):
""" Test get_boot_order command """
self.fabric.get_boot_order()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_boot_order()])
def test_set_pxe_interface(self):
""" Test set_pxe_interface command """
self.fabric.set_pxe_interface("eth0")
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.set_pxe_interface("eth0")]
)
def test_get_pxe_interface(self):
""" Test get_pxe_interface command """
self.fabric.get_pxe_interface()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_pxe_interface()])
def test_get_versions(self):
""" Test get_versions command """
self.fabric.get_versions()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_versions()])
def test_get_ubootenv(self):
""" Test get_ubootenv command """
self.fabric.get_ubootenv()
for node in self.nodes:
self.assertEqual(node.method_calls, [call.get_ubootenv()])
def test_ipmitool_command(self):
""" Test ipmitool_command command """
ipmitool_args = "power status"
self.fabric.ipmitool_command(ipmitool_args)
for node in self.nodes:
self.assertEqual(
node.method_calls, [call.ipmitool_command(ipmitool_args)]
)
def test_get_server_ip(self):
""" Test get_server_ip command """
self.fabric.get_server_ip("interface", "ipv6", "aggressive")
for node in self.nodes:
self.assertEqual(node.method_calls,
[call.get_server_ip("interface", "ipv6", "aggressive")]
)
def test_failed_command(self):
""" Test a failed command """
fail_nodes = [DummyFailNode(i) for i in DummyNode.ip_addresses]
self.fabric._nodes = dict(
(i, fail_nodes[i]) for i in xrange(len(self.nodes))
)
try:
self.fabric.get_power()
self.fail()
except CommandFailedError:
for node in fail_nodes:
self.assertEqual(node.method_calls, [call.get_power()])
def test_primary_node(self):
"""Test the primary_node property
Currently it should always return node 0.
"""
self.assertEqual(self.fabric.primary_node, self.nodes[0])
def test_get_ipsrc(self):
"""Test the get_ipsrc method
"""
self.fabric.get_ipsrc()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_ip_src()
])
def test_set_ipsrc(self):
"""Test the set_ipsrc method"""
ipsrc = random.randint(1, 2)
self.fabric.set_ipsrc(ipsrc)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_ip_src(ipsrc)
])
def test_apply_fdc(self):
"""Test the apply_factory_default_config method"""
self.fabric.apply_factory_default_config()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_factory_default()
])
def test_get_ipaddr_base(self):
"""Test the get_ipaddr_base method"""
self.fabric.get_ipaddr_base()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_ip_addr_base()
])
def test_update_config(self):
"""Test the update_config method
"""
self.fabric.update_config()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_update_config()
])
def test_get_linkspeed(self):
"""Test the get_linkspeed method
"""
self.fabric.get_linkspeed()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_linkspeed()
])
def test_set_linkspeed(self):
"""Test the set_linkspeed method"""
valid_linkspeeds = [1, 2.5, 5, 7.5, 10]
linkspeed = random.choice(valid_linkspeeds)
self.fabric.set_linkspeed(linkspeed)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_linkspeed(linkspeed)
])
def test_get_linkspeed_policy(self):
"""Test the get_linkspeed_policy method
"""
self.fabric.get_linkspeed_policy()
self.assertTrue(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_linkspeed_policy()
])
def test_set_linkspeed_policy(self):
"""Test the set_linkspeed_policy method"""
ls_policy = random.randint(0, 1)
self.fabric.set_linkspeed_policy(ls_policy)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_linkspeed_policy(ls_policy)
])
def test_get_link_stats(self):
"""Test the get_link_stats() method."""
for i in range(0, 5):
self.fabric.get_link_stats(i)
for node in self.fabric.nodes.values():
node.get_link_stats.assert_called_with(i)
def test_get_linkmap(self):
"""Test the get_linkmap method"""
self.fabric.get_linkmap()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_linkmap.called)
def test_get_routing_table(self):
"""Test the get_routing_table method"""
self.fabric.get_routing_table()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_routing_table.called)
def test_get_depth_chart(self):
"""Test the depth_chart method"""
self.fabric.get_depth_chart()
for node in self.fabric.nodes.values():
self.assertTrue(node.get_depth_chart.called)
def test_get_link_users_factor(self):
"""Test the get_link_users_factor method
"""
self.fabric.get_link_users_factor()
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_get_link_users_factor()
])
def test_set_link_users_factor(self):
"""Test the set_link_users_factor method"""
lu_factor = random.randint(5, 50)
self.fabric.set_link_users_factor(lu_factor)
self.assertEqual(self.fabric.primary_node.bmc.method_calls, [
call.fabric_config_set_link_users_factor(lu_factor)
])
def test_add_macaddr (self):
"""Test the add_macaddr method"""
valid_nodeids = [0, 1, 2, 3]
t_nodeid = random.choice(valid_nodeids)
valid_ifaces = [0, 1, 2]
t_iface = random.choice(valid_ifaces)
t_macaddr = "66:55:44:33:22:11"
self.fabric.add_macaddr (t_nodeid, t_iface, t_macaddr)
self.assertTrue(self.fabric.primary_node.bmc.fabric_add_macaddr.called)
def test_rm_macaddr (self):
"""Test the rm_macaddr method"""
valid_nodeids = [0, 1, 2, 3]
t_nodeid = random.choice(valid_nodeids)
valid_ifaces = [0, 1, 2]
t_iface = random.choice(valid_ifaces)
t_macaddr = "66:55:44:33:22:11"
self.fabric.rm_macaddr (t_nodeid, t_iface, t_macaddr)
self.assertTrue(self.fabric.primary_node.bmc.fabric_rm_macaddr.called)
def test_set_macaddr_base(self):
"""Test the set_macaddr_base method"""
self.fabric.set_macaddr_base("00:11:22:33:44:55")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(node.bmc.method_calls, [
call.fabric_config_set_macaddr_base(
macaddr="00:11:22:33:44:55"
)
])
else:
self.assertEqual(node.bmc.method_calls, [])
def test_get_macaddr_base(self):
"""Test the get_macaddr_base method"""
self.assertEqual(self.fabric.get_macaddr_base(), "00:00:00:00:00:00")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(
node.bmc.method_calls,
[call.fabric_config_get_macaddr_base()]
)
else:
self.assertEqual(node.bmc.method_calls, [])
def test_set_macaddr_mask(self):
"""Test the set_macaddr_mask method"""
self.fabric.set_macaddr_mask("00:11:22:33:44:55")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(node.bmc.method_calls, [
call.fabric_config_set_macaddr_mask(
mask="00:11:22:33:44:55"
)
])
else:
self.assertEqual(node.bmc.method_calls, [])
def test_get_macaddr_mask(self):
"""Test the get_macaddr_mask method"""
self.assertEqual(self.fabric.get_macaddr_mask(), "00:00:00:00:00:00")
for node in self.fabric.nodes.values():
if node == self.fabric.primary_node:
self.assertEqual(
node.bmc.method_calls,
[call.fabric_config_get_macaddr_mask()]
)
else:
self.assertEqual(node.bmc.method_calls, [])
def test_composite_bmc(self):
""" Test the CompositeBMC member """
with self.assertRaises(AttributeError):
self.fabric.cbmc.fake_method()
self.fabric.cbmc.set_chassis_power("off")
results = self.fabric.cbmc.get_chassis_status()
self.assertEqual(len(results), len(self.fabric.nodes))
for node_id in self.fabric.nodes:
self.assertFalse(results[node_id].power_on)
for node in self.fabric.nodes.values():
self.assertEqual(node.bmc.method_calls, [
call.set_chassis_power("off"),
call.get_chassis_status()
])
|
lokirius/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/pyxmpp2/version.py
|
46
|
# pylint: disable=C0111,C0103
version = '2.0alpha2'
|
percy-g2/Novathor_xperia_u8500
|
refs/heads/master
|
6.1.1.B.1.54/external/webkit/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
|
15
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebKit Gtk implementation of the Port interface."""
import logging
import os
import signal
from webkitpy.layout_tests.port.webkit import WebKitPort
_log = logging.getLogger("webkitpy.layout_tests.port.gtk")
class GtkPort(WebKitPort):
"""WebKit Gtk implementation of the Port class."""
def __init__(self, **kwargs):
kwargs.setdefault('port_name', 'gtk')
WebKitPort.__init__(self, **kwargs)
def _tests_for_other_platforms(self):
# FIXME: This list could be dynamic based on platform name and
# pushed into base.Port.
# This really need to be automated.
return [
"platform/chromium",
"platform/win",
"platform/qt",
"platform/mac",
]
def _path_to_apache_config_file(self):
# FIXME: This needs to detect the distribution and change config files.
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-debian-httpd.conf')
def _shut_down_http_server(self, server_pid):
"""Shut down the httpd web server. Blocks until it's fully
shut down.
Args:
server_pid: The process ID of the running server.
"""
# server_pid is not set when "http_server.py stop" is run manually.
if server_pid is None:
# FIXME: This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
self._executive.kill_all('apache2')
else:
try:
os.kill(server_pid, signal.SIGTERM)
# TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
except OSError:
# Sometimes we get a bad PID (e.g. from a stale httpd.pid
# file), so if kill fails on the given PID, just try to
# 'killall' web servers.
self._shut_down_http_server(None)
def _path_to_driver(self):
return self._build_path('Programs', 'DumpRenderTree')
def check_build(self, needs_http):
if not self._check_driver():
return False
return True
def _path_to_apache(self):
if self._is_redhat_based():
return '/usr/sbin/httpd'
else:
return '/usr/sbin/apache2'
def _path_to_apache_config_file(self):
if self._is_redhat_based():
config_name = 'fedora-httpd.conf'
else:
config_name = 'apache2-debian-httpd.conf'
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf',
config_name)
def _path_to_wdiff(self):
if self._is_redhat_based():
return '/usr/bin/dwdiff'
else:
return '/usr/bin/wdiff'
def _is_redhat_based(self):
return self._filesystem.exists(self._filesystem.join('/etc', 'redhat-release'))
|
SHA2017-badge/micropython-esp32
|
refs/heads/master
|
tests/extmod/uzlib_decompio_gz.py
|
26
|
try:
import uzlib as zlib
import uio as io
except ImportError:
print("SKIP")
raise SystemExit
# gzip bitstream
buf = io.BytesIO(b'\x1f\x8b\x08\x08\x99\x0c\xe5W\x00\x03hello\x00\xcbH\xcd\xc9\xc9\x07\x00\x86\xa6\x106\x05\x00\x00\x00')
inp = zlib.DecompIO(buf, 16 + 8)
print(buf.seek(0, 1))
print(inp.read(1))
print(buf.seek(0, 1))
print(inp.read(2))
print(inp.read())
print(buf.seek(0, 1))
print(inp.read(1))
print(inp.read())
print(buf.seek(0, 1))
# Check FHCRC field
buf = io.BytesIO(b'\x1f\x8b\x08\x02\x99\x0c\xe5W\x00\x03\x00\x00\xcbH\xcd\xc9\xc9\x07\x00\x86\xa6\x106\x05\x00\x00\x00')
inp = zlib.DecompIO(buf, 16 + 8)
print(inp.read())
# Check FEXTRA field
buf = io.BytesIO(b'\x1f\x8b\x08\x04\x99\x0c\xe5W\x00\x03\x01\x00X\xcbH\xcd\xc9\xc9\x07\x00\x86\xa6\x106\x05\x00\x00\x00')
inp = zlib.DecompIO(buf, 16 + 8)
print(inp.read())
# broken header
buf = io.BytesIO(b'\x1f\x8c\x08\x08\x99\x0c\xe5W\x00\x03hello\x00\xcbH\xcd\xc9\xc9\x07\x00\x86\xa6\x106\x05\x00\x00\x00')
try:
inp = zlib.DecompIO(buf, 16 + 8)
except ValueError:
print("ValueError")
# broken crc32
buf = io.BytesIO(b'\x1f\x8b\x08\x08\x99\x0c\xe5W\x00\x03hello\x00\xcbH\xcd\xc9\xc9\x07\x00\x86\xa7\x106\x05\x00\x00\x00')
inp = zlib.DecompIO(buf, 16 + 8)
try:
inp.read(6)
except OSError as e:
print(repr(e))
# broken uncompressed size - not checked so far
#buf = io.BytesIO(b'\x1f\x8b\x08\x08\x99\x0c\xe5W\x00\x03hello\x00\xcbH\xcd\xc9\xc9\x07\x00\x86\xa6\x106\x06\x00\x00\x00')
#inp = zlib.DecompIO(buf, 16 + 8)
#inp.read(6)
|
adambreznicky/python
|
refs/heads/master
|
Early/EventsLayer.py
|
2
|
# ---------------------------------------------------------------------------
# EventsLayer.py
# Created on: 2013-05-21 12:09:49.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Local variables:
TXDOT_Roadways = "TXDOT_Roadways"
TPP_GIS_MCHAMB1_SUBFILES = "TPP_GIS.MCHAMB1.SUBFILES"
RDWY_Events = "RDWY_Events"
# Process: Make Route Event Layer
arcpy.MakeRouteEventLayer_lr(TXDOT_Roadways, "RTE_ID", TPP_GIS_MCHAMB1_SUBFILES, "RTE_ID LINE BMP EMP", RDWY_Events, "", "NO_ERROR_FIELD", "NO_ANGLE_FIELD", "NORMAL", "ANGLE", "LEFT", "POINT")
|
hachreak/invenio-communities
|
refs/heads/master
|
invenio_communities/tasks.py
|
4
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""OpenAIRE service integration for Invenio repositories."""
from __future__ import absolute_import, print_function
from datetime import datetime
from celery import shared_task
from invenio_db import db
from .models import Community, InclusionRequest
@shared_task(ignore_result=True)
def delete_marked_communities():
"""Delete communities after holdout time."""
# TODO: Delete the community ID from all records metadata first
raise NotImplementedError()
Community.query.filter_by(
Community.delete_time > datetime.utcnow()).delete()
db.session.commit()
@shared_task(ignore_result=True)
def delete_expired_requests():
"""Delete expired inclusion requests."""
InclusionRequest.query.filter_by(
InclusionRequest.expiry_date > datetime.utcnow()).delete()
db.session.commit()
|
affansyed/bcc
|
refs/heads/master
|
tools/old/offcputime.py
|
4
|
#!/usr/bin/python
#
# offcputime Summarize off-CPU time by kernel stack trace
# For Linux, uses BCC, eBPF.
#
# USAGE: offcputime [-h] [-u] [-p PID] [-v] [-f] [duration]
#
# The current implementation uses an unrolled loop for x86_64, and was written
# as a proof of concept. This implementation should be replaced in the future
# with an appropriate bpf_ call, when available.
#
# Currently limited to a stack trace depth of 21 (maxdepth + 1).
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
import signal
# arguments
examples = """examples:
./offcputime # trace off-CPU stack time until Ctrl-C
./offcputime 5 # trace for 5 seconds only
./offcputime -f 5 # 5 seconds, and output in folded format
./offcputime -u # don't include kernel threads (user only)
./offcputime -p 185 # trace fo PID 185 only
"""
parser = argparse.ArgumentParser(
description="Summarize off-CPU time by kernel stack trace",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-u", "--useronly", action="store_true",
help="user threads only (no kernel threads)")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-v", "--verbose", action="store_true",
help="show raw addresses")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("duration", nargs="?", default=99999999,
help="duration of trace, in seconds")
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
debug = 0
maxdepth = 20 # and MAXDEPTH
if args.pid and args.useronly:
print("ERROR: use either -p or -u.")
exit()
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MAXDEPTH 20
#define MINBLOCK_US 1
struct key_t {
char name[TASK_COMM_LEN];
// Skip saving the ip
u64 ret[MAXDEPTH];
};
BPF_HASH(counts, struct key_t);
BPF_HASH(start, u32);
static u64 get_frame(u64 *bp) {
if (*bp) {
// The following stack walker is x86_64 specific
u64 ret = 0;
if (bpf_probe_read(&ret, sizeof(ret), (void *)(*bp+8)))
return 0;
if (bpf_probe_read(bp, sizeof(*bp), (void *)*bp))
*bp = 0;
if (ret < __START_KERNEL_map)
return 0;
return ret;
}
return 0;
}
int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
u32 pid = prev->pid;
u64 ts, *tsp;
// record previous thread sleep time
if (FILTER) {
ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
// calculate current thread's delta time
pid = bpf_get_current_pid_tgid();
tsp = start.lookup(&pid);
if (tsp == 0)
return 0; // missed start or filtered
u64 delta = bpf_ktime_get_ns() - *tsp;
start.delete(&pid);
delta = delta / 1000;
if (delta < MINBLOCK_US)
return 0;
// create map key
u64 zero = 0, *val, bp = 0;
int depth = 0;
struct key_t key = {};
bpf_get_current_comm(&key.name, sizeof(key.name));
bp = ctx->bp;
// unrolled loop (MAXDEPTH):
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
if (!(key.ret[depth++] = get_frame(&bp))) goto out;
out:
val = counts.lookup_or_init(&key, &zero);
(*val) += delta;
return 0;
}
"""
if args.pid:
filter = 'pid == %s' % args.pid
elif args.useronly:
filter = '!(prev->flags & PF_KTHREAD)'
else:
filter = '1'
bpf_text = bpf_text.replace('FILTER', filter)
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
matched = b.num_open_kprobes()
if matched == 0:
print("0 functions traced. Exiting.")
exit()
# header
if not folded:
print("Tracing off-CPU time (us) by kernel stack", end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
# output
while (1):
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
counts = b.get_table("counts")
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
if folded:
# print folded stack output
line = k.name.decode() + ";"
for i in reversed(range(0, maxdepth)):
if k.ret[i] == 0:
continue
line = line + b.ksym(k.ret[i])
if i != 0:
line = line + ";"
print("%s %d" % (line, v.value))
else:
# print default multi-line stack output
for i in range(0, maxdepth):
if k.ret[i] == 0:
break
print(" %-16x %s" % (k.ret[i],
b.ksym(k.ret[i])))
print(" %-16s %s" % ("-", k.name))
print(" %d\n" % v.value)
counts.clear()
if not folded:
print("Detaching...")
exit()
|
technologiescollege/s2a_fr
|
refs/heads/portable
|
s2a/Python/Lib/site-packages/PyMata/pymata_serial.py
|
2
|
__author__ = 'Copyright (c) 2013 Alan Yorinks All rights reserved.'
"""
Created on Tue Sep 3 07:12:01 2013
@author: Alan Yorinks
Copyright (c) 2013-14 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import threading
import serial
import time
class PyMataSerial(threading.Thread):
"""
This class manages the serial port for Arduino serial communications
"""
# class variables
arduino = serial.Serial()
port_id = ""
baud_rate = 57600
timeout = 1
command_deque = None
def __init__(self, port_id, command_deque):
"""
Constructor:
@param command_deque: A reference to the deque shared with the _command_handler
"""
self.port_id = port_id
self.command_deque = command_deque
threading.Thread.__init__(self)
self.daemon = True
self.arduino = serial.Serial(self.port_id, self.baud_rate,
timeout=int(self.timeout))
#self.arduino.writeTimeout = 2
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def is_stopped(self):
return self.stop_event.is_set()
def open(self):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
print '\nCommunication avec la carte Arduino sur le port %s ...' % self.port_id
try:
# in case the port is already open, let's close it and then
#reopen it
self.arduino.close()
time.sleep(1)
self.arduino.open()
time.sleep(1)
return self.arduino
except Exception:
# opened failed - will report back to caller
raise
def close(self):
"""
Close the serial port
return: None
"""
try:
self.arduino.close()
except OSError:
pass
def write(self, data):
"""
write the data to the serial port
return: None
"""
self.arduino.write(data)
def run(self):
"""
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
"""
while not self.is_stopped():
# we can get an OSError: [Errno9] Bad file descriptor when shutting down
# just ignore it
try:
if self.arduino.inWaiting():
c = self.arduino.read()
self.command_deque.append(ord(c))
except OSError:
pass
except IOError:
self.stop()
self.close()
|
blrm/openshift-tools
|
refs/heads/stg
|
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_utils/library/openshift_cert_expiry.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,invalid-name
"""For details on this module see DOCUMENTATION (below)"""
import base64
import datetime
import io
import os
import subprocess
import yaml
# pylint import-error disabled because pylint cannot find the package
# when installed in a virtualenv
from ansible.module_utils.six.moves import configparser # pylint: disable=import-error
from ansible.module_utils.basic import AnsibleModule
try:
# You can comment this import out and include a 'pass' in this
# block if you're manually testing this module on a NON-ATOMIC
# HOST (or any host that just doesn't have PyOpenSSL
# available). That will force the `load_and_handle_cert` function
# to use the Fake OpenSSL classes.
import OpenSSL.crypto
HAS_OPENSSL = True
except ImportError:
# Some platforms (such as RHEL Atomic) may not have the Python
# OpenSSL library installed. In this case we will use a manual
# work-around to parse each certificate.
#
# Check for 'OpenSSL.crypto' in `sys.modules` later.
HAS_OPENSSL = False
DOCUMENTATION = '''
---
module: openshift_cert_expiry
short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
description:
- The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
- When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
- C(ok) - not expired, and outside of the expiration C(warning_days) window.
- C(warning) - not expired, but will expire between now and the C(warning_days) window.
- C(expired) - an expired certificate.
- Certificate flagging follow this logic:
- If the expiration date is before now then the certificate is classified as C(expired).
- The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
- All other conditions are classified as C(ok).
- The following keys are ALSO present in the certificate summary:
- C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
- C(days_remaining) - The number of days until the certificate expires.
- C(expiry) - The date the certificate expires on.
- C(path) - The full path to the certificate on the examined host.
version_added: "1.0"
options:
config_base:
description:
- Base path to OCP system settings.
required: false
default: /etc/origin
warning_days:
description:
- Flag certificates which will expire in C(warning_days) days from now.
required: false
default: 30
show_all:
description:
- Enable this option to show analysis of ALL certificates examined by this module.
- By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
required: false
default: false
author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>"
'''
EXAMPLES = '''
# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
- openshift_cert_expiry:
# Expand the warning window to show certificates expiring within a year from now
- openshift_cert_expiry: warning_days=365
# Show expired, soon to expire (now + 30 days), and all other certificates examined
- openshift_cert_expiry: show_all=true
'''
class FakeOpenSSLCertificate(object):
"""This provides a rough mock of what you get from
`OpenSSL.crypto.load_certificate()`. This is a work-around for
platforms missing the Python OpenSSL library.
"""
def __init__(self, cert_string):
"""`cert_string` is a certificate in the form you get from running a
.crt through 'openssl x509 -in CERT.cert -text'"""
self.cert_string = cert_string
self.serial = None
self.subject = None
self.extensions = []
self.not_after = None
self._parse_cert()
def _parse_cert(self):
"""Manually parse the certificate line by line"""
self.extensions = []
PARSING_ALT_NAMES = False
PARSING_HEX_SERIAL = False
for line in self.cert_string.split('\n'):
l = line.strip()
if PARSING_ALT_NAMES:
# We're parsing a 'Subject Alternative Name' line
self.extensions.append(
FakeOpenSSLCertificateSANExtension(l))
PARSING_ALT_NAMES = False
continue
if PARSING_HEX_SERIAL:
# Hex serials arrive colon-delimited
serial_raw = l.replace(':', '')
# Convert to decimal
self.serial = int('0x' + serial_raw, base=16)
PARSING_HEX_SERIAL = False
continue
# parse out the bits that we can
if l.startswith('Serial Number:'):
# Decimal format:
# Serial Number: 11 (0xb)
# => 11
# Hex Format (large serials):
# Serial Number:
# 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
# => 14449739080294792594019643629255165375
if l.endswith(':'):
PARSING_HEX_SERIAL = True
continue
self.serial = int(l.split()[-2])
elif l.startswith('Not After :'):
# Not After : Feb 7 18:19:35 2019 GMT
# => strptime(str, '%b %d %H:%M:%S %Y %Z')
# => strftime('%Y%m%d%H%M%SZ')
# => 20190207181935Z
not_after_raw = l.partition(' : ')[-1]
# Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT')
not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z')
self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
elif l.startswith('X509v3 Subject Alternative Name:'):
PARSING_ALT_NAMES = True
continue
elif l.startswith('Subject:'):
# O = system:nodes, CN = system:node:m01.example.com
self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
def get_serial_number(self):
"""Return the serial number of the cert"""
return self.serial
def get_subject(self):
"""Subjects must implement get_components() and return dicts or
tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
"""
return self.subject
def get_extension(self, i):
"""Extensions must implement get_short_name() and return the string
'subjectAltName'"""
return self.extensions[i]
def get_extension_count(self):
""" get_extension_count """
return len(self.extensions)
def get_notAfter(self):
"""Returns a date stamp as a string in the form
'20180922170439Z'. strptime the result with format param:
'%Y%m%d%H%M%SZ'."""
return self.not_after
class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods
"""Mocks what happens when `get_extension` is called on a certificate
object"""
def __init__(self, san_string):
"""With `san_string` as you get from:
$ openssl x509 -in certificate.crt -text
"""
self.san_string = san_string
self.short_name = 'subjectAltName'
def get_short_name(self):
"""Return the 'type' of this extension. It's always the same though
because we only care about subjectAltName's"""
return self.short_name
def __str__(self):
"""Return this extension and the value as a simple string"""
return self.san_string
# pylint: disable=too-few-public-methods
class FakeOpenSSLCertificateSubjects(object):
"""Mocks what happens when `get_subject` is called on a certificate
object"""
def __init__(self, subject_string):
"""With `subject_string` as you get from:
$ openssl x509 -in certificate.crt -text
"""
self.subjects = []
for s in subject_string.split(', '):
name, _, value = s.partition(' = ')
self.subjects.append((name, value))
def get_components(self):
"""Returns a list of tuples"""
return self.subjects
######################################################################
def filter_paths(path_list):
"""`path_list` - A list of file paths to check. Only files which exist
will be returned
"""
return [p for p in path_list if os.path.exists(os.path.realpath(p))]
# pylint: disable=too-many-locals,too-many-branches
#
# TODO: Break this function down into smaller chunks
def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None):
"""Load a certificate, split off the good parts, and return some
useful data
Params:
- `cert_string` (string) - a certificate loaded into a string object
- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
- `base64decode` (bool) - run base64.b64decode() on the input
- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors)
Returns:
A tuple of the form:
(cert_subject, cert_expiry_date, time_remaining, cert_serial_number)
"""
if base64decode:
_cert_string = base64.b64decode(cert_string).decode('utf-8')
else:
_cert_string = cert_string
# Disable this. We 'redefine' the type because we are working
# around a missing library on the target host.
#
# pylint: disable=redefined-variable-type
if HAS_OPENSSL:
# No work-around required
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
else:
# Missing library, work-around required. Run the 'openssl'
# command on it to decode it
cmd = 'openssl x509 -text'
try:
openssl_proc = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
except OSError:
ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
else:
openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8')
cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
######################################################################
# Read all possible names from the cert
cert_subjects = []
for name, value in cert_loaded.get_subject().get_components():
if isinstance(name, bytes) or isinstance(value, bytes):
name = name.decode('utf-8')
value = value.decode('utf-8')
cert_subjects.append('{}:{}'.format(name, value))
# To read SANs from a cert we must read the subjectAltName
# extension from the X509 Object. What makes this more difficult
# is that pyOpenSSL does not give extensions as an iterable
san = None
for i in range(cert_loaded.get_extension_count()):
ext = cert_loaded.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
san = ext
if san is not None:
# The X509Extension object for subjectAltName prints as a
# string with the alt names separated by a comma and a
# space. Split the string by ', ' and then add our new names
# to the list of existing names
cert_subjects.extend(str(san).split(', '))
cert_subject = ', '.join(cert_subjects)
######################################################################
# Grab the expiration date
not_after = cert_loaded.get_notAfter()
# example get_notAfter() => 20180922170439Z
if isinstance(not_after, bytes):
not_after = not_after.decode('utf-8')
cert_expiry_date = datetime.datetime.strptime(
not_after,
'%Y%m%d%H%M%SZ')
time_remaining = cert_expiry_date - now
return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number())
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
"""Given metadata about a certificate under examination, classify it
into one of three categories, 'ok', 'warning', and 'expired'.
Params:
- `cert_meta` dict - A dict with certificate metadata. Required fields
include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
- `cert_list` list - A list to shove the classified cert into
Return:
- `cert_list` - The updated list of classified certificates
"""
expiry_str = str(cert_meta['expiry'])
# Categorization
if cert_meta['expiry'] < now:
# This already expired, must NOTIFY
cert_meta['health'] = 'expired'
elif time_remaining < expire_window:
# WARN about this upcoming expirations
cert_meta['health'] = 'warning'
else:
# Not expired or about to expire
cert_meta['health'] = 'ok'
cert_meta['expiry'] = expiry_str
cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
cert_list.append(cert_meta)
return cert_list
def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
"""Calculate the summary text for when the module finishes
running. This includes counts of each classification and what have
you.
Params:
- `certificates` (list of dicts) - Processed `expire_check_result`
dicts with filled in `health` keys for system certificates.
- `kubeconfigs` - as above for kubeconfigs
- `etcd_certs` - as above for etcd certs
Return:
- `summary_results` (dict) - Counts of each cert type classification
and total items examined.
"""
items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
summary_results = {
'system_certificates': len(certificates),
'kubeconfig_certificates': len(kubeconfigs),
'etcd_certificates': len(etcd_certs),
'router_certs': len(router_certs),
'registry_certs': len(registry_certs),
'total': len(items),
'ok': 0,
'warning': 0,
'expired': 0
}
summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
return summary_results
######################################################################
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
# Reason: These checks are disabled because the issue was introduced
# during a period where the pylint checks weren't enabled for this file
# Status: temporarily disabled pending future refactoring
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
"""
module = AnsibleModule(
argument_spec=dict(
config_base=dict(
required=False,
default="/etc/origin",
type='str'),
warning_days=dict(
required=False,
default=30,
type='int'),
show_all=dict(
required=False,
default=False,
type='bool')
),
supports_check_mode=True,
)
# Basic scaffolding for OpenShift specific certs
openshift_base_config_path = os.path.realpath(module.params['config_base'])
openshift_master_config_path = os.path.join(openshift_base_config_path,
"master", "master-config.yaml")
openshift_node_config_path = os.path.join(openshift_base_config_path,
"node", "node-config.yaml")
openshift_cert_check_paths = [
openshift_master_config_path,
openshift_node_config_path,
]
# Paths for Kubeconfigs. Additional kubeconfigs are conditionally
# checked later in the code
master_kube_configs = ['admin', 'openshift-master',
'openshift-node', 'openshift-router',
'openshift-registry']
kubeconfig_paths = []
for m_kube_config in master_kube_configs:
kubeconfig_paths.append(
os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
)
# Validate some paths we have the ability to do ahead of time
openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
kubeconfig_paths = filter_paths(kubeconfig_paths)
# etcd, where do you hide your certs? Used when parsing etcd.conf
etcd_cert_params = [
"ETCD_TRUSTED_CA_FILE",
"ETCD_CERT_FILE",
"ETCD_PEER_TRUSTED_CA_FILE",
"ETCD_PEER_CERT_FILE",
]
# Expiry checking stuff
now = datetime.datetime.now()
# todo, catch exception for invalid input and return a fail_json
warning_days = int(module.params['warning_days'])
expire_window = datetime.timedelta(days=warning_days)
# Module stuff
#
# The results of our cert checking to return from the task call
check_results = {}
check_results['meta'] = {}
check_results['meta']['warning_days'] = warning_days
check_results['meta']['checked_at_time'] = str(now)
check_results['meta']['warn_before_date'] = str(now + expire_window)
check_results['meta']['show_all'] = str(module.params['show_all'])
# All the analyzed certs accumulate here
ocp_certs = []
######################################################################
# Sure, why not? Let's enable check mode.
if module.check_mode:
check_results['ocp_certs'] = []
module.exit_json(
check_results=check_results,
msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
rc=0,
changed=False
)
######################################################################
# Check for OpenShift Container Platform specific certs
######################################################################
for os_cert in filter_paths(openshift_cert_check_paths):
# Open up that config file and locate the cert and CA
with io.open(os_cert, 'r', encoding='utf-8') as fp:
cert_meta = {}
cfg = yaml.load(fp)
# cert files are specified in parsed `fp` as relative to the path
# of the original config file. 'master-config.yaml' with certFile
# = 'foo.crt' implies that 'foo.crt' is in the same
# directory. certFile = '../foo.crt' is in the parent directory.
cfg_path = os.path.dirname(fp.name)
servingInfoFile = cfg.get('servingInfo', {}).get('certFile')
if servingInfoFile:
cert_meta['certFile'] = os.path.join(cfg_path, servingInfoFile)
servingInfoCA = cfg.get('servingInfo', {}).get('clientCA')
if servingInfoCA:
cert_meta['clientCA'] = os.path.join(cfg_path, servingInfoCA)
serviceSigner = cfg.get('controllerConfig', {}).get('serviceServingCert', {}).get('signer', {}).get('certFile')
if serviceSigner:
cert_meta['serviceSigner'] = os.path.join(cfg_path, serviceSigner)
etcdClientCA = cfg.get('etcdClientInfo', {}).get('ca')
if etcdClientCA:
cert_meta['etcdClientCA'] = os.path.join(cfg_path, etcdClientCA)
etcdClientCert = cfg.get('etcdClientInfo', {}).get('certFile')
if etcdClientCert:
cert_meta['etcdClientCert'] = os.path.join(cfg_path, etcdClientCert)
kubeletCert = cfg.get('kubeletClientInfo', {}).get('certFile')
if kubeletCert:
cert_meta['kubeletCert'] = os.path.join(cfg_path, kubeletCert)
proxyClient = cfg.get('kubernetesMasterConfig', {}).get('proxyClientInfo', {}).get('certFile')
if proxyClient:
cert_meta['proxyClient'] = os.path.join(cfg_path, proxyClient)
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
for v in cert_meta.values():
with io.open(v, 'r', encoding='utf-8') as fp:
cert = fp.read()
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(cert, now, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
######################################################################
# /Check for OpenShift Container Platform specific certs
######################################################################
######################################################################
# Check service Kubeconfigs
######################################################################
kubeconfigs = []
# There may be additional kubeconfigs to check, but their naming
# is less predictable than the ones we've already assembled.
try:
# Try to read the standard 'node-config.yaml' file to check if
# this host is a node.
with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp:
cfg = yaml.load(fp)
# OK, the config file exists, therefore this is a
# node. Nodes have their own kubeconfig files to
# communicate with the master API. Let's read the relative
# path to that file from the node config.
node_masterKubeConfig = cfg['masterKubeConfig']
# As before, the path to the 'masterKubeConfig' file is
# relative to `fp`
cfg_path = os.path.dirname(fp.name)
node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
# Read in the nodes kubeconfig file and grab the good stuff
cfg = yaml.load(fp)
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
except IOError:
# This is not a node
pass
for kube in filter_paths(kubeconfig_paths):
with io.open(kube, 'r', encoding='utf-8') as fp:
# TODO: Maybe consider catching exceptions here?
cfg = yaml.load(fp)
# Per conversation, "the kubeconfigs you care about:
# admin, router, registry should all be single
# value". Following that advice we only grab the data for
# the user at index 0 in the 'users' list. There should
# not be more than one user.
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
######################################################################
# /Check service Kubeconfigs
######################################################################
######################################################################
# Check etcd certs
#
# Two things to check: 'external' etcd, and embedded etcd.
######################################################################
# FIRST: The 'external' etcd
#
# Some values may be duplicated, make this a set for now so we
# unique them all
etcd_certs_to_check = set([])
etcd_certs = []
etcd_cert_params.append('dne')
try:
with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
# Add dummy header section.
config = io.StringIO()
config.write(u'[ETCD]\n')
config.write(fp.read().replace('%', '%%'))
config.seek(0, os.SEEK_SET)
etcd_config = configparser.ConfigParser()
etcd_config.readfp(config)
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
except configparser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
# No etcd to see here, move along
pass
for etcd_cert in filter_paths(etcd_certs_to_check):
with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
c = fp.read()
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(c, now, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': fp.name,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
######################################################################
# /Check etcd certs
######################################################################
######################################################################
# Check router/registry certs
#
# These are saved as secrets in etcd. That means that we can not
# simply read a file to grab the data. Instead we're going to
# subprocess out to the 'oc get' command. On non-masters this
# command will fail, that is expected so we catch that exception.
######################################################################
router_certs = []
registry_certs = []
######################################################################
# First the router certs
try:
router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
stdout=subprocess.PIPE)
router_ds = yaml.load(router_secrets_raw.communicate()[0])
router_c = router_ds['data']['tls.crt']
router_path = router_ds['metadata']['selfLink']
except TypeError:
# YAML couldn't load the result, this is not a master
pass
except OSError:
# The OC command doesn't exist here. Move along.
pass
else:
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': router_path,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
######################################################################
# Now for registry
try:
registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
stdout=subprocess.PIPE)
registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
registry_c = registry_ds['data']['registry.crt']
registry_path = registry_ds['metadata']['selfLink']
except TypeError:
# YAML couldn't load the result, this is not a master
pass
except OSError:
# The OC command doesn't exist here. Move along.
pass
else:
(cert_subject,
cert_expiry_date,
time_remaining,
cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
'path': registry_path,
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
######################################################################
# /Check router/registry certs
######################################################################
res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
count=res['total'],
exp=res['expired'],
warn=res['warning'],
ok=res['ok'],
window=int(module.params['warning_days']),
)
# By default we only return detailed information about expired or
# warning certificates. If show_all is true then we will print all
# the certificates examined.
if not module.params['show_all']:
check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
else:
check_results['ocp_certs'] = ocp_certs
check_results['kubeconfigs'] = kubeconfigs
check_results['etcd'] = etcd_certs
check_results['registry'] = registry_certs
check_results['router'] = router_certs
# Sort the final results to report in order of ascending safety
# time. That is to say, the certificates which will expire sooner
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
def cert_key(item):
''' return the days_remaining key '''
return item['days_remaining']
check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
# error we noticed earlier
module.exit_json(
check_results=check_results,
summary=res,
msg=msg,
rc=0,
changed=False
)
if __name__ == '__main__':
main()
|
fitermay/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/Py479.after.py
|
79
|
class Foo:
def __init__(self):
self.tmp = False
def extract_method(self, condition1, condition2, condition3, condition4):
list = (1, 2, 3)
a = 6
b = False
if a in list or self.tmp:
if condition1:
print(condition1)
if b is not condition2:
print(b)
else:
self.bar(condition3, condition4)
def bar(self, condition3_new, condition4_new):
self.tmp2 = True
if condition3_new:
print(condition3_new)
if condition4_new:
print(condition4_new)
print("misterious extract method test")
f = Foo()
f.extract_method(True, True, True, True)
|
jeffery-do/Vizdoombot
|
refs/heads/master
|
doom/lib/python3.5/site-packages/theano/tensor/nnet/tests/test_nnet.py
|
3
|
from __future__ import print_function
import unittest
import numpy
from nose.plugins.skip import SkipTest
from six.moves import xrange
import theano
from theano import config
from theano import tensor as T
from theano import tensor
from theano import gof
from theano.tests import unittest_tools as utt
from theano import printing
from theano.tensor.nnet import (categorical_crossentropy,
crossentropy_categorical_1hot,
crossentropy_softmax_1hot,
crossentropy_softmax_1hot_with_bias,
crossentropy_softmax_1hot_with_bias_dx,
crossentropy_softmax_argmax_1hot_with_bias,
CrossentropySoftmax1HotWithBiasDx,
CrossentropySoftmaxArgmax1HotWithBias,
CrossentropyCategorical1Hot,
CrossentropyCategorical1HotGrad,
sigmoid, softplus, Softmax, softmax,
softmax_op, softmax_graph, SoftmaxWithBias,
softmax_with_bias, LogSoftmax, logsoftmax_op,
softmax_grad, SoftmaxGrad,
Prepend_scalar_constant_to_each_row,
Prepend_scalar_to_each_row,
relu,
h_softmax,
elu)
from theano.tensor import matrix, vector, lvector, scalar
class T_sigmoid(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(sigmoid, [numpy.random.rand(3, 4)])
class T_softplus(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test_elemwise(self):
utt.verify_grad(softplus, [numpy.random.rand(3, 4)])
class T_Softmax(utt.InferShapeTester):
def test0(self):
def f(a):
return softmax_op(a)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test1(self):
def f(a):
return softmax_op(a)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test2(self):
def f(a):
return softmax_op(a)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test3(self):
def f(a):
return softmax_op(a)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test_infer_shape(self):
admat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat], [Softmax()(admat)],
[admat_val], Softmax)
def test_vector(self):
x = T.vector()
f = theano.function([x], softmax_op(x))
xv = numpy.random.randn(6).astype(config.floatX)
assert numpy.allclose(f(xv), numpy.exp(xv) / numpy.exp(xv).sum())
def test_vector_grad(self):
def f(a):
return softmax_op(a)
utt.verify_grad(f, [numpy.random.rand(4)])
class T_SoftmaxWithBias(utt.InferShapeTester):
def test0(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
def test1(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
def test2(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
def test3(self):
def f(a, b):
return softmax_with_bias(a, b)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
def test_broadcast(self):
# test that we don't raise an error during optimization for no good
# reason as softmax_with_bias don't support correctly some/all
# broadcasted inputs pattern
initial_W = numpy.asarray([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1]],
dtype=theano.config.floatX)
W = theano.shared(value=initial_W, name='W')
vbias = theano.shared(value=0.1, name='vbias') # 0.01
hid = T.vector('hid')
f = theano.function([hid],
T.nnet.softmax_op(T.dot(hid, W.T) + vbias))
ops = [node.op for node in f.maker.fgraph.toposort()]
assert softmax_with_bias not in ops
assert softmax_op in ops
f([0, 1, 0])
# print f.maker.fgraph.toposort()
def test_softmax_with_bias_trace(self):
a = theano.shared(
numpy.random.randn(3).astype(config.floatX))
b = theano.shared(numpy.float32(numpy.random.randn()))
sm = T.nnet.softmax(a + b)
f = theano.function([], sm)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
print('f.maker.fgraph.outputs[0]: {0}'.format(f.maker.fgraph.outputs[0], ))
def test_infer_shape(self):
admat = matrix()
advec = vector()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
advec_val = numpy.random.rand(4).astype(config.floatX)
self._compile_and_check([admat, advec],
[SoftmaxWithBias()(admat, advec)],
[admat_val, advec_val], SoftmaxWithBias)
class T_LogSoftmax(utt.InferShapeTester):
def test0(self):
def f(a):
return logsoftmax_op(a)[:, 0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test1(self):
def f(a):
return logsoftmax_op(a)[:, 1]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test2(self):
def f(a):
return logsoftmax_op(a)[:, 2]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test3(self):
def f(a):
return logsoftmax_op(a)[:, 3]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test_matrix(self):
def f(a):
return logsoftmax_op(a)
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test_vector(self):
x = T.vector()
f = theano.function([x], logsoftmax_op(x))
xv = numpy.random.randn(6).astype(config.floatX)
assert numpy.allclose(f(xv),
numpy.log(numpy.exp(xv) / numpy.exp(xv).sum()))
def test_vector_grad(self):
def f(a):
return logsoftmax_op(a)
utt.verify_grad(f, [numpy.random.rand(4)])
def test_allclose(self):
m = theano.config.mode
m = theano.compile.get_mode(m)
m.check_isfinite = False
x, y = tensor.matrices('xy')
# regular softmax and crossentropy
sm = tensor.nnet.softmax(x)
cm = tensor.nnet.categorical_crossentropy(sm, y)
# numerically stable log-softmax with crossentropy
logsm = tensor.nnet.logsoftmax(x)
sm2 = tensor.exp(logsm) # just used to show equivalence with sm
cm2 = -tensor.sum(y * logsm, axis=1)
grad = tensor.grad(cm2.mean(), x)
# create some inputs into a softmax that are large and labels
a = numpy.exp(10 * numpy.random.rand(5, 10).astype(theano.config.floatX))
# create some one-hot coded labels
b = numpy.eye(5, 10).astype(theano.config.floatX)
# show equivalence of softmax and exponentiated numerically stable
# log-softmax
f1 = theano.function([x], [sm, sm2])
sm_, sm2_ = f1(a)
utt.assert_allclose(sm_, sm2_)
# now show that the two versions result in the same crossentropy cost
# this indicates that the forward function does provide some numerical
# stability
f2 = theano.function([x, y], [cm, cm2], mode=m)
cm_, cm2_ = f2(a, b)
utt.assert_allclose(cm_, cm2_)
# now, show that in the standard softmax case the gradients blow up
# while in the log-softmax case they don't
f3 = theano.function([x, y], [grad])
grad_ = f3(a, b)
assert numpy.all(numpy.isnan(grad_) == False)
def test_isclose(self):
def f(a):
return logsoftmax_op(a)
def test_local_softmax_optimization(self):
"""Test the Logsoftmax substitution
Check that Log(Softmax(x)) is substituted with Logsoftmax(x). Note that
only the forward pass is checked (i.e., doesn't check the gradient)
"""
x, y = tensor.matrices('xy')
sm = tensor.nnet.softmax(x)
logsm = tensor.log(sm)
f = theano.function([x], logsm)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
assert isinstance(f.maker.fgraph.outputs[0].owner.op,
theano.tensor.nnet.nnet.LogSoftmax)
def test_local_softmax_grad_optimization_and_big_input(self):
"""Test the Logsoftmax's grad substitution.
Check that Log(Softmax(x))'s grad is substituted with Logsoftmax(x)'s
grad and that the new operation does not explode for big inputs.
Note that only the grad is checked.
"""
m = theano.config.mode
m = theano.compile.get_mode(m)
m.check_isfinite = False
# some inputs that are large to make the gradient explode in the non
# optimized case
a = numpy.exp(10 * numpy.random.rand(5, 10).astype(theano.config.floatX))
def myfunc(x):
sm = tensor.nnet.softmax(x)
logsm = tensor.log(sm)
return logsm
# We set step to 0.1 because for big values we need a big epsilon
utt.verify_grad(myfunc, [a], eps=0.1, mode=m)
f = theano.function([], myfunc(a))
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
class T_SoftmaxGrad(utt.InferShapeTester):
def test_infer_shape(self):
admat = matrix()
bdmat = matrix()
admat_val = numpy.random.rand(3, 4).astype(config.floatX)
bdmat_val = numpy.random.rand(3, 4).astype(config.floatX)
self._compile_and_check([admat, bdmat], [SoftmaxGrad()(admat, bdmat)],
[admat_val, bdmat_val], SoftmaxGrad)
class T_CrossentropySoftmax1Hot(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def test0(self):
y_idx = [0, 1, 3]
def f(a, b):
return crossentropy_softmax_1hot_with_bias(a, b, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(3, 4),
numpy.random.rand(4)])
def test1(self):
y_idx = [0, 1, 3]
def f(a):
return crossentropy_softmax_1hot(a, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(3, 4)])
def test_vector(self):
y_idx = [3]
def f(a):
return crossentropy_softmax_1hot(T.shape_padleft(a), y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(4)])
def test_vectors(self):
y_idx = [3]
def f(a, b):
return crossentropy_softmax_1hot(T.shape_padleft(a) + b, y_idx)[0]
utt.verify_grad(f, [numpy.random.rand(4), numpy.random.rand(4)])
class T_CrossentropySoftmax1HotWithBiasDx(utt.InferShapeTester):
def test0(self):
def ff(class_dtype):
def f(sm):
# Class indices
y = numpy.random.randint(low=0, high=5, size=10).astype(class_dtype)
return theano.tensor.nnet.crossentropy_softmax_1hot_with_bias_dx(
numpy.random.rand(10), # Gradient w.r.t. NLL.
sm, # Softmax output.
y)
return f
# Build a random softmax output whose rows sum to 1.
softmax_output = numpy.random.rand(10, 5)
softmax_output /= softmax_output.sum(axis=1).reshape(10, 1)
for dtype in ['uint8', 'int8', 'uint64', 'int64']:
utt.verify_grad(ff(dtype), [softmax_output])
def test1(self):
rng = numpy.random.RandomState(utt.fetch_seed())
softmax_output = rng.rand(10, 5)
softmax_output /= softmax_output.sum(axis=1).reshape(10, 1)
def f(dy):
return (theano.tensor.nnet.crossentropy_softmax_1hot_with_bias_dx(
dy,
softmax_output,
rng.randint(low=0, high=5, size=10)))
utt.verify_grad(f, [rng.rand(10)])
def test_infer_shape(self):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(10, 5).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
advec_val = rng.rand(10).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=10)
self._compile_and_check([advec, admat, alvec],
[CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)],
[advec_val, admat_val, alvec_val],
CrossentropySoftmax1HotWithBiasDx)
def test_neg_idx(self):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(10, 5).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
advec_val = rng.rand(10).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=10)
alvec_val[1] = -1
out = CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)
f = theano.function([advec, admat, alvec], out)
self.assertRaises(ValueError, f, advec_val, admat_val, alvec_val)
class T_CrossentropySoftmaxArgmax1HotWithBias(utt.InferShapeTester):
def setUp(self):
super(T_CrossentropySoftmaxArgmax1HotWithBias, self).setUp()
self.op = theano.tensor.nnet.crossentropy_softmax_argmax_1hot_with_bias
def test0(self):
n_classes = 5
n_samples = 3
# First test gradient when getting a gradient on the NLL output.
def grad_on_nll_dtype(dtype):
def grad_on_nll(x, b):
y_idx = numpy.random.randint(low=0, high=n_classes, size=n_samples).astype(dtype)
return self.op(x, b, y_idx=y_idx)[0]
return grad_on_nll
for dtype in ['uint8', 'int8', 'uint64', 'int64']:
utt.verify_grad(grad_on_nll_dtype(dtype),
[numpy.random.rand(n_samples, n_classes),
numpy.random.rand(n_classes)])
# Then test gradient when getting a gradient on the softmax output.
def grad_on_softmax(x, b):
return self.op(x, b, y_idx=numpy.random.randint(
low=0, high=n_classes, size=n_samples))[1]
utt.verify_grad(grad_on_softmax,
[numpy.random.rand(n_samples, n_classes),
numpy.random.rand(n_classes)])
def test_infer_shape(self):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
advec_val = rng.rand(5).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=3)
self._compile_and_check([admat, advec, alvec],
CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec),
[admat_val, advec_val, alvec_val],
CrossentropySoftmaxArgmax1HotWithBias)
def test_neg_idx(self):
admat = matrix()
advec = vector()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
advec_val = rng.rand(5).astype(config.floatX)
alvec_val = rng.randint(low=0, high=5, size=3)
alvec_val[1] = -1
out = CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec)
f = theano.function([admat, advec, alvec], out)
self.assertRaises(ValueError, f, admat_val, advec_val, alvec_val)
class T_prepend(utt.InferShapeTester):
def test0(self):
x = tensor.matrix('x')
y = Prepend_scalar_constant_to_each_row(4.)(x)
f = theano.function([x], y)
m = numpy.random.rand(3, 5).astype(config.floatX)
my = f(m)
self.assertTrue(my.shape == (3, 6), my.shape)
self.assertTrue(numpy.all(my[:, 0] == 4.0))
def test1(self):
"basic functionality"
x = tensor.matrix('x')
y = Prepend_scalar_to_each_row()(5., x)
f = theano.function([x], y)
m = numpy.ones((3, 5), dtype="float32")
my = f(m)
self.assertTrue(my.shape == (3, 6))
self.assertTrue(numpy.all(my[:, 0] == 5.0))
def test_infer_shape(self):
admat = matrix()
adscal = scalar()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 5).astype(config.floatX)
adscal_val = numpy.asarray(rng.rand(), dtype=config.floatX).item()
self._compile_and_check([admat],
[Prepend_scalar_constant_to_each_row(adscal_val)(admat)],
[admat_val],
Prepend_scalar_constant_to_each_row)
self._compile_and_check([adscal, admat],
[Prepend_scalar_to_each_row()(adscal, admat)],
[adscal_val, admat_val],
Prepend_scalar_to_each_row)
class T_CrossentropyCategorical1HotGrad(utt.InferShapeTester):
def test_infer_shape(self):
advec = vector()
admat = matrix()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
advec_val = rng.rand(3).astype(config.floatX)
admat_val = rng.rand(3, 2).astype(config.floatX)
alvec_val = [0, 1, 0]
self._compile_and_check([advec, admat, alvec],
[CrossentropyCategorical1HotGrad()(advec, admat, alvec)],
[advec_val, admat_val, alvec_val],
CrossentropyCategorical1HotGrad)
class T_CrossentropyCategorical1Hot(utt.InferShapeTester):
def test_grad(self):
x = tensor.matrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(x, one_of_n)
f = theano.function([x, one_of_n], xe)
x_val = numpy.asarray([[.4, .6, .0], [.1, .8, .1]],
dtype=config.floatX)
xe_val = f(x_val, [0, 1])
assert numpy.allclose(xe_val, -numpy.log([.4, .8]))
def oplike(x):
return op(x, [0, 1])
tensor.verify_grad(oplike, [x_val], rng=numpy.random)
def test_infer_shape(self):
admat = matrix()
alvec = lvector()
rng = numpy.random.RandomState(utt.fetch_seed())
admat_val = rng.rand(3, 2).astype(config.floatX)
alvec_val = [0, 1, 0]
self._compile_and_check([admat, alvec],
[CrossentropyCategorical1Hot()(admat, alvec)],
[admat_val, alvec_val],
CrossentropyCategorical1Hot)
def test_softmax_optimizations(self):
x = tensor.matrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(x, one_of_n)
fgraph = gof.FunctionGraph(
[x, one_of_n],
[op(softmax_op(x), one_of_n)])
assert fgraph.outputs[0].owner.op == op
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
def test_softmax_optimizations_vector(self):
x = tensor.vector('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
fgraph = gof.FunctionGraph(
[x, one_of_n],
[op(softmax_op(x), one_of_n)])
assert fgraph.outputs[0].owner.op == op
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
def test_softmax_optimizations_w_bias(self):
x = tensor.matrix('x')
b = tensor.vector('b')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(x, one_of_n)
fgraph = gof.FunctionGraph(
[x, b, one_of_n],
[op(softmax_op(x + b), one_of_n)])
assert fgraph.outputs[0].owner.op == op
# print 'BEFORE'
# for node in fgraph.toposort():
# print node.op
# print printing.pprint(node.outputs[0])
# print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
# print printing.pprint(node.outputs[0])
# print '===='
assert len(fgraph.toposort()) == 2
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
def test_softmax_optimizations_w_bias2(self):
x = tensor.matrix('x')
b = tensor.vector('b')
c = tensor.vector('c')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
fgraph = gof.FunctionGraph(
[x, b, c, one_of_n],
[op(softmax_op(T.add(x, b, c)), one_of_n)])
assert fgraph.outputs[0].owner.op == op
# print 'BEFORE'
# for node in fgraph.toposort():
# print node.op
# print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
# print '===='
assert len(fgraph.toposort()) == 3
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
def test_softmax_optimizations_w_bias_vector(self):
x = tensor.vector('x')
b = tensor.vector('b')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
fgraph = gof.FunctionGraph(
[x, b, one_of_n],
[op(softmax_op(x + b), one_of_n)])
assert fgraph.outputs[0].owner.op == op
# print 'BEFORE'
# for node in fgraph.toposort():
# print node.op
# print printing.pprint(node.outputs[0])
# print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
# print '===='
assert len(fgraph.toposort()) == 3
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
def test_softmax_grad_optimizations(self):
x = tensor.matrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(softmax_op(x), one_of_n)
sum_xe = tensor.sum(xe)
g_x = tensor.grad(sum_xe, x)
fgraph = gof.FunctionGraph(
[x, one_of_n],
[g_x])
self.assertTrue(hasattr(fgraph.outputs[0].tag, 'trace'))
# print 'BEFORE'
# for node in fgraph.toposort():
# print node.op, node.inputs
# print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op, node.inputs
has_cx1hot = False
has_cx1hotdx = False
has_softmax = False
has_softmaxdx = False
for node in fgraph.toposort():
if node.op == crossentropy_softmax_argmax_1hot_with_bias:
has_cx1hot = True
if node.op == crossentropy_softmax_1hot_with_bias_dx:
has_cx1hotdx = True
if node.op == softmax_op:
has_softmax = True
if node.op == softmax_grad:
has_softmaxdx = True
assert not has_cx1hot
assert has_cx1hotdx
assert has_softmax
assert not has_softmaxdx
def test_softmax_grad_optimizations_vector(self):
x = tensor.vector('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(softmax_op(x), one_of_n)
sum_xe = tensor.sum(xe)
g_x = tensor.grad(sum_xe, x)
fgraph = gof.FunctionGraph(
[x, one_of_n],
[g_x])
# print 'BEFORE'
# for node in fgraph.toposort():
# print node.op, node.inputs
# print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op, node.inputs
has_cx1hot = False
has_cx1hotdx = False
has_softmax = False
has_softmaxdx = False
for node in fgraph.toposort():
if node.op == crossentropy_softmax_argmax_1hot_with_bias:
has_cx1hot = True
if node.op == crossentropy_softmax_1hot_with_bias_dx:
has_cx1hotdx = True
if node.op == softmax_op:
has_softmax = True
if node.op == softmax_grad:
has_softmaxdx = True
assert not has_cx1hot
assert has_cx1hotdx
assert has_softmax
assert not has_softmaxdx
def test_get_rid_of_advanced_indexing_version_of_xent(self):
verbose = 0
# TODO: add the optimization in FAST_COMPILE?
# In the mean time, run it as 'FAST_RUN' instead
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1])
x = T.matrix('x')
b = T.vector('b')
y = T.lvector('y')
# Basic case
expressions = [
T.sum(-T.log(softmax(x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(x))[T.arange(y.shape[0]), y])
]
for expr in expressions:
# Verify the optimizer worked on the expressions
f = theano.function([x, y], expr, mode=mode)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 4
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
# Also verify the gradient wrt x
g = theano.function([x, y], T.grad(expr, x), mode=mode)
self.assertTrue(hasattr(g.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 2
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_op in ops
assert softmax_grad not in ops
g(x_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
# Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in bias_expressions:
f = theano.function([x, b, y], expr, mode=mode)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 2 # [big_op, sum]
assert crossentropy_softmax_argmax_1hot_with_bias in ops
f(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
self.assertTrue(hasattr(g.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 2
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
# Test that using "mean" instead of sum works, too
mean_expressions = [
T.mean(-T.log(softmax(x)[T.arange(y.shape[0]), y])),
-T.mean(T.log(softmax(x)[T.arange(y.shape[0]), y])),
-T.mean(T.log(softmax(x))[T.arange(y.shape[0]), y]),
T.mean(-T.log(softmax(x))[T.arange(y.shape[0]), y])]
for expr in mean_expressions:
f = theano.function([x, y], expr, mode=mode)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 6
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
g = theano.function([x, y], T.grad(expr, x), mode=mode)
self.assertTrue(hasattr(g.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 5
#there's an extra dimshuffle in there
# but I can't think of a good rule to get rid of it
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_op in ops
assert softmax_grad not in ops
g(x_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
mean_bias_expressions = [
T.mean(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.mean(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.mean(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.mean(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in mean_bias_expressions:
f = theano.function([x, b, y], expr, mode=mode)
self.assertTrue(hasattr(f.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 4
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
except Exception:
theano.printing.debugprint(f)
raise
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
self.assertTrue(hasattr(g.maker.fgraph.outputs[0].tag, 'trace'))
if verbose:
theano.printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 5
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_xent_thing_int32(self):
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1], dtype='int64')
x = T.matrix('x')
y = T.lvector('y')
yi = T.cast(y, 'int32')
expressions = [
T.sum(-T.log(softmax(x)[T.arange(yi.shape[0]), yi])),
-T.sum(T.log(softmax(x)[T.arange(yi.shape[0]), yi])),
-T.sum(T.log(softmax(x))[T.arange(yi.shape[0]), yi]),
T.sum(-T.log(softmax(x))[T.arange(yi.shape[0]), yi])
]
for expr in expressions:
# Verify the optimizer worked on the expressions
f = theano.function([x, y], expr, mode=mode)
if verbose:
theano.printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 5
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
# Also verify the gradient wrt x
g = theano.function([x, y], T.grad(expr, x), mode=mode)
if verbose:
theano.printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 3
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_op in ops
assert softmax_grad not in ops
g(x_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_optimize_xent_vector(self):
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
x = T.vector('x')
y = T.lvector('y')
# Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x)[T.arange(y.shape[0]), y]))]
for expr in bias_expressions:
f = theano.function([x, y], expr, mode=mode)
if verbose:
printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
assert len(ops) == 5
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
g = theano.function([x, y], T.grad(expr, x), mode=mode)
if verbose:
printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) == 4
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_op in ops
assert softmax_grad not in ops
g(x_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_optimize_xent_vector2(self):
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
x = T.vector('x')
b = T.vector('b')
y = T.lvector('y')
# Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in bias_expressions:
f = theano.function([x, b, y], expr, mode=mode)
if verbose:
printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
# [big_op, sum, dim_shuffle]
assert len(ops) == 3
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
finally:
config.warn.sum_div_dimshuffle_bug = backup
if verbose:
printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) <= 6
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_optimize_xent_vector3(self):
# Same as test_optimize_xent_vector2, but y is the result of
# a "flatten", and it used to make the constant-folding
# of arange(y.shape[0]) happen before the xent optimization
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
x = T.vector('x')
b = T.vector('b')
y_ = T.lvector('y_')
y = y_.flatten()
# Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in bias_expressions:
f = theano.function([x, b, y_], expr, mode=mode)
if verbose:
printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
# [big_op, sum, dim_shuffle, flatten]
assert len(ops) <= 4
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
finally:
config.warn.sum_div_dimshuffle_bug = backup
if verbose:
printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) <= 6
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_optimize_xent_vector4(self):
# Same as test_optimize_xent_vector2, but y is the result of
# a "specify_shape" that indicates its length is 1, so the
# constant-folding of arange(y.shape[0]) happen before the xent
# optimization
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5).astype(config.floatX)
b_val = rng.randn(5).astype(config.floatX)
y_val = numpy.asarray([2])
x = T.vector('x')
b = T.vector('b')
y_ = T.lvector('y_')
y = T.specify_shape(y_, (1,))
# Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in bias_expressions:
f = theano.function([x, b, y_], expr, mode=mode)
if verbose:
printing.debugprint(f)
try:
ops = [node.op for node in f.maker.fgraph.toposort()]
# [big_op, sum, dim_shuffle, specify_shape]
assert len(ops) <= 4
assert crossentropy_softmax_argmax_1hot_with_bias in ops
assert not [1 for o in ops
if isinstance(o, T.AdvancedSubtensor)]
f(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
finally:
config.warn.sum_div_dimshuffle_bug = backup
if verbose:
printing.debugprint(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) <= 6
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
def test_crossentropy_softmax_1hot_with_bias_dxcale_cost(self):
# TODO: add the optimization in FAST_COMPILE?
# In the mean time, run it as 'FAST_RUN' instead
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(3, 5).astype(config.floatX)
y_val = numpy.asarray([2, 4, 1])
x = T.matrix('x')
y = T.lvector('y')
a = T.scalar('a')
def validate_fn_graph(func):
# The graph of the function should not have softmax anymore
has_cx1hot = False
has_softmax = False
for node in func.maker.fgraph.toposort():
if node.op == crossentropy_softmax_argmax_1hot_with_bias:
has_cx1hot = True
if node.op == softmax_op:
has_softmax = True
assert has_cx1hot
assert not has_softmax
def validate_grad_graph(func):
# The graph of the gradient should not have softmaxgrad anymore
has_cx1hotdx = False
has_softmax = False
has_softmaxdx = False
for node in func.maker.fgraph.toposort():
if node.op == crossentropy_softmax_1hot_with_bias_dx:
has_cx1hotdx = True
if node.op == softmax_op:
has_softmax = True
if node.op == softmax_grad:
has_softmaxdx = True
assert has_cx1hotdx
assert has_softmax
assert not has_softmaxdx
# Cases to test
expressions = [
a * T.sum(-T.log(softmax(x)[T.arange(y.shape[0]), y])),
-a * T.sum(T.log(softmax(x)[T.arange(y.shape[0]), y])),
a * (-T.sum(T.log(softmax(x)[T.arange(y.shape[0]), y]))),
a * T.sum(T.log(softmax(x)[T.arange(y.shape[0]), y])),
a * T.sum(-T.log(softmax(x))[T.arange(y.shape[0]), y]),
-a * T.sum(T.log(softmax(x))[T.arange(y.shape[0]), y]),
a * (-T.sum(T.log(softmax(x))[T.arange(y.shape[0]), y])),
a * T.sum(T.log(softmax(x))[T.arange(y.shape[0]), y]),
a * T.mean(-T.log(softmax(x)[T.arange(y.shape[0]), y])),
-a * T.mean(T.log(softmax(x)[T.arange(y.shape[0]), y])),
a * (-T.mean(T.log(softmax(x)[T.arange(y.shape[0]), y]))),
a * T.mean(T.log(softmax(x)[T.arange(y.shape[0]), y])),
a * T.mean(-T.log(softmax(x))[T.arange(y.shape[0]), y]),
-a * T.mean(T.log(softmax(x))[T.arange(y.shape[0]), y]),
a * (-T.mean(T.log(softmax(x))[T.arange(y.shape[0]), y])),
a * T.mean(T.log(softmax(x))[T.arange(y.shape[0]), y]),
]
for expr in expressions:
# Verify the optimizer worked on the expressions
f = theano.function([x, y, a], expr, mode=mode)
try:
assert 5 <= len(f.maker.fgraph.toposort()) <= 10
validate_fn_graph(f)
f(x_val, y_val, 0.1)
except Exception:
theano.printing.debugprint(f)
raise
# Verify the gradient wrt x
g = theano.function([x, y, a], T.grad(expr, x), mode=mode)
try:
assert 3 <= len(g.maker.fgraph.toposort()) <= 6
validate_grad_graph(g)
g(x_val, y_val, 0.1)
except Exception:
theano.printing.debugprint(g)
raise
# Verify the gradient when providing output gradient
h = theano.function([x, y, a],
T.grad(expr, x, known_grads={expr: a * x.sum()}), mode=mode)
try:
assert 6 <= len(h.maker.fgraph.toposort()) <= 8
validate_grad_graph(h)
h(x_val, y_val, 0.1)
except Exception:
theano.printing.debugprint(h)
raise
def test_argmax_pushdown():
x = tensor.matrix()
for sm in [softmax_graph, softmax_op]:
# test that the max_and_argmax is pushed down if the max is not used
out = tensor.max_and_argmax(
sm(tensor.exp(tensor.tanh(sigmoid(x)))),
axis=-1)[1]
fgraph = gof.FunctionGraph(
[x],
[out])
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
assert len(fgraph.toposort()) == 2 # an output_guard is second
assert fgraph.toposort()[0].op == tensor.basic._max_and_argmax
assert str(fgraph.toposort()[1].op) == 'OutputGuard'
x = tensor.matrix()
# test that the max_and_argmax is not pushed down if the max is used
out = tensor.max_and_argmax(
sm(tensor.exp(tensor.tanh(sigmoid(x)))),
axis=-1)[0]
fgraph = gof.FunctionGraph(
[x],
[out])
assert hasattr(fgraph.outputs[0].tag, 'trace')
backup = config.warn.argmax_pushdown_bug
config.warn.argmax_pushdown_bug = False
try:
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
finally:
config.warn.argmax_pushdown_bug = backup
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
assert len(fgraph.toposort()) == 4 # an output_guard is second
assert isinstance(fgraph.toposort()[0].op, tensor.Elemwise)
assert isinstance(fgraph.toposort()[1].op, Softmax)
assert isinstance(fgraph.toposort()[2].op, tensor.CAReduce)
assert isinstance(fgraph.toposort()[2].op.scalar_op, theano.scalar.Maximum)
assert str(fgraph.toposort()[3].op) == 'OutputGuard'
def test_argmax_pushdown_bias():
x = tensor.matrix()
b = tensor.vector()
out = tensor.argmax(softmax_with_bias(x, b), axis=-1)
fgraph = gof.FunctionGraph(
[x, b],
[out])
f = theano.function([x, b], out)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
assert len(fgraph.toposort()) == 4
assert isinstance(fgraph.toposort()[0].op, tensor.DimShuffle)
assert isinstance(fgraph.toposort()[1].op, tensor.Elemwise)
assert isinstance(fgraph.toposort()[2].op, tensor.MaxAndArgmax)
assert str(fgraph.toposort()[3].op) == 'OutputGuard'
x = tensor.matrix()
b = tensor.vector()
out = tensor.max_and_argmax(softmax_with_bias(x, b), axis=-1)[0]
fgraph = gof.FunctionGraph(
[x, b],
[out])
f = theano.function([x, b], out)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
backup = config.warn.argmax_pushdown_bug
config.warn.argmax_pushdown_bug = False
try:
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
finally:
config.warn.argmax_pushdown_bug = backup
# print 'AFTER'
# for node in fgraph.toposort():
# print node.op
assert len(fgraph.toposort()) == 3
assert isinstance(fgraph.toposort()[0].op, SoftmaxWithBias)
assert isinstance(fgraph.toposort()[1].op, tensor.CAReduce)
assert isinstance(fgraph.toposort()[1].op.scalar_op, theano.scalar.Maximum)
assert str(fgraph.toposort()[2].op) == 'OutputGuard'
def test_asymptotic_32():
"""
This test makes sure that our functions behave sensibly when
huge values are present
"""
# TODO: consider adding the optimization of crossentropy into the current
# mode for the purpose of running this test
for dtype in 'float32', 'float64':
if dtype == 'float32':
x = tensor.fmatrix()
x2 = tensor.fvector()
else:
x = tensor.dmatrix()
x2 = tensor.dvector()
y = tensor.lvector()
c = categorical_crossentropy(softmax(x + x2), y)
f = theano.function([x, y, x2], [c.sum(),
tensor.grad(c.sum(), x)], mode='FAST_RUN')
if 0:
for i, n in enumerate(f.maker.fgraph.toposort()):
print(i, n)
xval = numpy.zeros((5, 5), dtype=dtype).astype(dtype)
x2val = numpy.zeros(5, dtype=xval.dtype).astype(dtype)
for i in xrange(100):
cval, gxval = f(xval, numpy.arange(5), x2val)
xval -= 100.3 * gxval
# print cval, gxval
assert cval == 0 # no problem going to zero error
# what about when x gets really big?
xval = numpy.zeros((5, 5), dtype=dtype)
x2val = numpy.zeros(5, dtype=xval.dtype)
for i in xrange(100):
cval, gxval = f(xval, numpy.arange(5), x2val)
xval += 100000.3 * gxval
# print cval, gxval
assert cval > 61750000
assert gxval[0, 0] == -1.0
assert gxval[0, 1] == 0.25
class Test_softmax_opt:
# Test that expressions of softmax in terms of exponentiated things
# divided by row sums are replaced by softmax expressions.
#
# Softmax_grad isn't that interesting as an Op, but it has the signature
# we look for when trying to insert CrossEntropySoftmax... grad. So, for
# now, we add softmax_grad to graphs. In the future, we may modify the
# CrossEntropySoftmax...grad to look for the more basic pattern.
#
def setUp(self):
utt.seed_rng()
self.rng = numpy.random.RandomState(utt.fetch_seed())
self.mode = theano.compile.mode.get_default_mode()
self.mode = self.mode.including('canonicalize')
def test_basic(self):
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')
# test that function contains softmax and no div.
f = theano.function([c], p_y, mode=self.mode)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
f_ops = [n.op for n in f.maker.fgraph.toposort()]
# print '--- f ='
# printing.debugprint(f)
# print '==='
assert len(f_ops) == 1
assert softmax_op in f_ops
f(self.rng.rand(3, 4).astype(config.floatX))
def test_basic_keepdims(self):
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=1, keepdims=True)
# test that function contains softmax and no div.
f = theano.function([c], p_y, mode=self.mode)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
f_ops = [n.op for n in f.maker.fgraph.toposort()]
# print '--- f ='
# printing.debugprint(f)
# print '==='
assert len(f_ops) == 1
assert softmax_op in f_ops
f(self.rng.rand(3, 4).astype(config.floatX))
def test_grad(self):
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')
# test that function contains softmax and softmaxgrad
w = T.matrix()
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c, w], T.grad((p_y * w).sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
g_ops = [n.op for n in g.maker.fgraph.toposort()]
# print '--- g ='
# printing.debugprint(g)
# print '==='
raise SkipTest('Optimization not enabled for the moment')
assert len(g_ops) == 2
assert softmax_op in g_ops
assert softmax_grad in g_ops
g(self.rng.rand(3, 4), self.rng.uniform(.5, 1, (3, 4)))
def test_transpose_basic(self):
# this should be a transposed softmax
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=0)
# test that function contains softmax and no div.
f = theano.function([c], p_y)
# printing.debugprint(f)
# test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c], T.grad(p_y.sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
# printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment')
def test_1d_basic(self):
# this should be a softmax, but of a one-row matrix
c = T.vector()
p_y = T.exp(c) / T.exp(c).sum()
# test that function contains softmax and no div.
f = theano.function([c], p_y)
hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
# printing.debugprint(f)
# test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c], T.grad(p_y.sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
# printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment')
# REPEAT 3 CASES in presence of log(softmax) with the advanced indexing
# etc.
def test_softmax_graph():
rng = numpy.random.RandomState(utt.fetch_seed())
x = theano.shared(rng.normal(size=(3, 4)))
def f(inputs):
y = softmax_graph(x)
return theano.grad(None, x, known_grads={y: inputs})
utt.verify_grad(f, [rng.rand(3, 4)])
def test_grad_softmax_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
x = theano.shared(rng.normal(size=(3, 4)))
def f(inputs):
y = softmax_op(x)
return theano.grad(None, x, known_grads={y: inputs})
utt.verify_grad(f, [rng.rand(3, 4)])
def test_stabilize_log_softmax():
mode = theano.compile.mode.get_default_mode()
mode = mode.including('local_log_softmax', 'specialize')
x = matrix()
y = softmax(x)
z = theano.tensor.log(y)
f = theano.function([x], z, mode=mode)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
# check that the softmax has been optimized out
for node in f.maker.fgraph.toposort():
assert not isinstance(node.op, y.owner.op.__class__)
# call the function so debug mode can verify the optimized
# version matches the unoptimized version
rng = numpy.random.RandomState([2012, 8, 22])
f(numpy.cast[config.floatX](rng.randn(2, 3)))
def test_relu():
x = matrix('x')
seed = theano.tests.unittest_tools.fetch_seed()
rng = numpy.random.RandomState(seed)
X = rng.randn(20, 30).astype(config.floatX)
# test the base case, without custom alpha value
y = relu(x).eval({x: X})
assert numpy.allclose(y, numpy.maximum(X, 0))
# test for different constant alpha values (also outside of [0, 1])
for alpha in 0, 0.3, 1, 2, -0.3, -1, -2:
y = relu(x, alpha).eval({x: X})
assert numpy.allclose(y, numpy.where(X > 0, X, alpha * X))
# test for variable alpha (scalar, vector and matrix)
for alpha in scalar(), vector(), matrix():
# create value for alpha (correct ndim and broadcastable against X)
A = numpy.array(rng.randn(*X.shape[::-1][:alpha.ndim][::-1]),
dtype=config.floatX)
y = relu(x, alpha).eval({x: X, alpha: A})
assert numpy.allclose(y, numpy.where(X > 0, X, A * X), rtol=3e-5)
def test_h_softmax():
"""
Tests the output dimensions of the h_softmax when a target is provided or
not.
"""
#############
# Config
#############
input_size = 4
batch_size = 2
h_softmax_level1_size = 5
h_softmax_level2_size = 3
output_size = h_softmax_level1_size * h_softmax_level2_size
#############
# Initialize shared variables
#############
floatX = theano.config.floatX
shared = theano.shared
# First level of h_softmax
W1 = numpy.asarray(numpy.random.normal(
size=(input_size, h_softmax_level1_size)), dtype=floatX)
W1 = shared(W1)
b1 = shared(numpy.asarray(numpy.zeros((h_softmax_level1_size,)),
dtype=floatX))
# Second level of h_softmax
W2 = numpy.asarray(numpy.random.normal(
size=(h_softmax_level1_size, input_size, h_softmax_level2_size)),
dtype=floatX)
W2 = shared(W2)
b2 = shared(
numpy.asarray(numpy.zeros((h_softmax_level1_size,
h_softmax_level2_size)), dtype=floatX))
#############
# Build graph
#############
x = tensor.matrix('x')
y = tensor.ivector('y')
# This only computes the output corresponding to the target
y_hat_tg = h_softmax(x, batch_size, output_size, h_softmax_level1_size,
h_softmax_level2_size, W1, b1, W2, b2, y)
# This computes all the outputs
y_hat_all = h_softmax(x, batch_size, output_size, h_softmax_level1_size,
h_softmax_level2_size, W1, b1, W2, b2)
#############
# Compile functions
#############
fun_output_tg = theano.function([x, y], y_hat_tg)
fun_output = theano.function([x], y_hat_all)
#############
# Test
#############
x_mat = numpy.random.normal(size=(batch_size, input_size)).astype(floatX)
y_mat = numpy.random.randint(0, output_size, batch_size).astype('int32')
tg_output = fun_output_tg(x_mat, y_mat)
all_outputs = fun_output(x_mat)
assert(tg_output.shape == (batch_size,))
assert(all_outputs.shape == (batch_size, output_size))
# Verifies that the outputs computed by fun_output_tg are the same as those
# computed by fun_output.
utt.assert_allclose(
all_outputs[numpy.arange(0, batch_size), y_mat], tg_output)
def test_elu():
x = matrix('x')
seed = theano.tests.unittest_tools.fetch_seed()
rng = numpy.random.RandomState(seed)
X = rng.randn(20, 30).astype(config.floatX)
# test the base case, without custom alpha value
y = elu(x).eval({x: X})
utt.assert_allclose(y, numpy.where(X > 0, X, numpy.exp(X) - 1))
# test for different constant alpha values
for alpha in 1.5, 2, -1, -1.5, -2:
y = elu(x, alpha).eval({x: X})
utt.assert_allclose(y, numpy.where(X > 0, X, alpha * (numpy.exp(X) - 1)))
|
ryanpetrello/draughtcraft
|
refs/heads/master
|
draughtcraft/data/tools/cacheflush.py
|
1
|
from pecan.commands.base import BaseCommand
BLUE = '\033[94m'
ENDS = '\033[0m'
def redis_connector():
from pecan import conf
from redis import Redis
return Redis(**conf.redis)
class RedisFlushCommand(BaseCommand):
"""
Flush the Redis resource cache.
"""
def run(self, args):
super(RedisFlushCommand, self).run(args)
self.load_app()
print "=" * 80
print BLUE + "FLUSHING CACHE" + ENDS
print "=" * 80
redis = redis_connector()
redis.flushdb()
|
cortedeltimo/SickRage
|
refs/heads/master
|
tests/tv_tests.py
|
11
|
# coding=UTF-8
# Author: Dennis Lutter <lad1337@gmail.com>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test tv
"""
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard.tv import TVEpisode, TVShow
import sickbeard
import tests.test_lib as test
class TVShowTests(test.SickbeardTestDBCase):
"""
Test tv shows
"""
def setUp(self):
"""
Set up tests
"""
super(TVShowTests, self).setUp()
sickbeard.showList = []
def test_init_indexerid(self):
"""
test init indexer id
"""
show = TVShow(1, 1, "en")
self.assertEqual(show.indexerid, 1)
def test_change_indexerid(self):
"""
test change indexer id
"""
show = TVShow(1, 1, "en")
show.name = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "Ended"
show.default_ep_status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
show.loadFromDB()
show.indexerid = 2
show.saveToDB()
show.loadFromDB()
self.assertEqual(show.indexerid, 2)
def test_set_name(self):
"""
test set name
"""
show = TVShow(1, 1, "en")
show.name = "newName"
show.saveToDB()
show.loadFromDB()
self.assertEqual(show.name, "newName")
class TVEpisodeTests(test.SickbeardTestDBCase):
"""
Test tv episode
"""
def setUp(self):
"""
Set up
"""
super(TVEpisodeTests, self).setUp()
sickbeard.showList = []
def test_init_empty_db(self):
"""
test init empty db
"""
show = TVShow(1, 1, "en")
episode = TVEpisode(show, 1, 1)
episode.name = "asdasdasdajkaj"
episode.saveToDB()
episode.loadFromDB(1, 1)
self.assertEqual(episode.name, "asdasdasdajkaj")
class TVTests(test.SickbeardTestDBCase):
"""
Test tv
"""
def setUp(self):
"""
Set up
"""
super(TVTests, self).setUp()
sickbeard.showList = []
@staticmethod
def test_get_episode():
"""
Test get episodes
"""
show = TVShow(1, 1, "en")
show.name = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "Ended"
show.default_ep_status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
sickbeard.showList = [show]
# TODO: implement
if __name__ == '__main__':
print("==================")
print("STARTING - TV TESTS")
print("==================")
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(TVShowTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(TVEpisodeTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(TVTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
lssfau/walberla
|
refs/heads/master
|
utilities/findMissingIncludeGuards.py
|
1
|
#!/usr/bin/env python3
import os
error = False
for root, dirnames, filenames in os.walk(".."):
for filename in filenames:
if filename.endswith(".h") and not filename.endswith(".impl.h"):
if "extern" not in root:
file = os.path.join(root, filename)
if "#pragma once" not in open(file).read():
print(file)
error = True
if error:
exit(-1)
|
smueller18/solar-thermal-climate-system
|
refs/heads/master
|
consumer/machine-state-prediction/consumer.py
|
1
|
#!/usr/bin/env python3
import os
import time
import logging.config
import pickle
import pandas as pd
from pca import PCAForPandas
import kafka_connector.avro_loop_consumer as avro_loop_consumer
from kafka_connector.avro_loop_consumer import AvroLoopConsumer
from tsfresh.feature_extraction import extract_features
from tsfresh.feature_extraction import settings
__author__ = u'Stephan Müller'
__copyright__ = u'2017, Stephan Müller'
__license__ = u'MIT'
__dirname__ = os.path.dirname(os.path.abspath(__file__))
KAFKA_HOSTS = os.getenv("KAFKA_HOSTS", "kafka:9092")
SCHEMA_REGISTRY_URL = os.getenv("SCHEMA_REGISTRY_URL", "http://schema-registry:8082")
CONSUMER_GROUP = os.getenv("CONSUMER_GROUP", "postgres")
TOPIC_NAME = os.getenv("TOPIC_NAME", "prod.machine_learning.aggregations_10minutes")
SHOW_CALCULATION_TIME = int(os.getenv("SHOW_CALCULATION_TIME", 0))
# Dict with column names
FC_PARAMETERS = os.getenv("FC_PARAMETERS", __dirname__ + "/data/fc-parameters.pkl")
# PCAForPandas object
PCA_MODEL = os.getenv("PCA_MODEL", __dirname__ + "/data/pca-model.pkl")
# ML model with predict function
ML_MODEL = os.getenv("ML_MODEL", __dirname__ + "/data/ml-model.pkl")
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO")
logging_format = "%(levelname)8s %(asctime)s %(name)s [%(filename)s:%(lineno)s - %(funcName)s() ] %(message)s"
logging.basicConfig(level=logging.getLevelName(LOGGING_LEVEL), format=logging_format)
logger = logging.getLogger('consumer')
with open(FC_PARAMETERS, 'rb') as f:
fc_parameters = pickle.load(f)
with open(PCA_MODEL, 'rb') as f:
pca_model = pickle.load(f)
with open(ML_MODEL, 'rb') as f:
ml_model = pickle.load(f)
def handle_message(msg):
if msg.key() is None or type(msg.key()) is not dict:
logger.warning("Key is none. Ignoring message.")
return
elif msg.value() is None or type(msg.value()) is not dict:
logger.warning("Value is none. Ignoring message.")
return
try:
time_begin = time.time()
timeseries = pd.melt(pd.DataFrame.from_dict(msg.value(), orient='index').transpose()).dropna()
timeseries['group_id'] = 0
if timeseries.isnull().sum().sum() > 0:
logger.warning("at least one field of timeseries is null")
return
X = extract_features(timeseries, column_id='group_id', column_kind="variable", column_value="value",
kind_to_fc_parameters=settings.from_columns(fc_parameters))
if X.isnull().sum().sum() > 0:
logger.warning("at least one field of extracted features is null")
return
kritisch = ml_model.predict(pca_model.transform(X))[0]
time_end = time.time()
start_prediction_interval = time.localtime(msg.key()['timestamp_end'] / 1000)
end_prediction_interval = time.localtime(msg.key()['timestamp_end'] / 1000 + 60*5)
print("Prediction for interval",
time.strftime("%H:%M:%S", start_prediction_interval),
"to",
time.strftime("%H:%M:%S", end_prediction_interval),
":",
"kritisch" if kritisch else "unkritisch"
)
if SHOW_CALCULATION_TIME == 1:
print("time for calculation", round(time_end - time_begin, 5), "seconds")
except Exception as e:
logger.exception(e)
consumer.stop()
config = avro_loop_consumer.default_config
config['enable.auto.commit'] = True
config['default.topic.config'] = dict()
config['default.topic.config']['auto.offset.reset'] = 'end'
consumer = AvroLoopConsumer(KAFKA_HOSTS, SCHEMA_REGISTRY_URL, CONSUMER_GROUP, [TOPIC_NAME], config=config)
consumer.loop(lambda msg: handle_message(msg))
|
weera00/xbmc
|
refs/heads/master
|
tools/Fake Episode Maker/openAnything.py
|
169
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import urllib2, urlparse, gzip
from StringIO import StringIO
USER_AGENT = 'OpenAnything/1.0 +http://diveintopython.org/http_web_services/'
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def openAnything(source, etag=None, lastmodified=None, agent=USER_AGENT):
'''URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
'''
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if urlparse.urlparse(source)[0] == 'http':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, last_modified=None, agent=USER_AGENT):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, last_modified, agent)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding', '') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
f.close()
return result
|
duanhongyi/kakfa
|
refs/heads/master
|
setup.py
|
1
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
setup(
name="kafka",
version="0.8.1-3",
install_requires=["distribute", "poolbase"],
packages=find_packages(),
author="duanhongyi",
author_email="duanhongyi@doopai.com",
url="https://github.com/duanhongyi/kafka",
license="Copyright 2012, Apache License, v2.0",
description="Pure Python client for Apache Kafka",
long_description=README,
)
|
higgsd/euler
|
refs/heads/master
|
py/14.py
|
1
|
# 837799
N = 1000000
lut = {1:1}
m = 1
mv = 1
for n in xrange(1, N):
if lut.has_key(n):
continue
ch = [n]
nx = n
while True:
if nx % 2 == 0:
nx /= 2
else:
nx = 3 * nx + 1
if lut.has_key(nx):
for x in ch[::-1]:
lut[x] = lut[nx] + 1
nx = x
if ch[0] < N and lut[ch[0]] > m:
m = lut[ch[0]]
mv = ch[0]
break
ch.append(nx)
print mv
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/GlobalVarAssignment.after.py
|
79
|
x = 0
def foo():
global x
bar()
def bar():
global x
x = 1
|
samkohn/Geiger-Counter
|
refs/heads/master
|
main.py
|
2
|
###############################
# Python script for analyzing pra data
###############################
# Imports
# External modules
import scipy as sp
import matplotlib as ml
import matplotlib.pyplot as pp
# Our modules
import analyzeData as ad
def plotCountRatePerTime(data, timeResolution = None):
'''Plot the count rate per time using a given
time resolution. If timeResolution is blank then the
default is data.maxTimeResolution (i.e. do nothing).'''
# Rebin if necessary
if timeResolution:
data = data.rebin(timeResolution)
times = sp.array(data.times)
counts = sp.array(data.counts)
# Plot
pp.plot(times, counts)
pp.show()
def plotHistOfCountRates(data, timeResolution = None, numOfBins = 10):
'''Plot the distribution of count rates. Splits the given DataSet
into separate 'samples' based on the timeResolution, so that
numSamples = totalTime/timeResolution. If a time resolution is
not given, the maximum possible is used for the DataSet given'''
if timeResolution:
data = data.rebin(timeResolution)
# Make a histogram of the count rates from the DataSet
hist, bin_edges = sp.histogram(data.counts, numOfBins)
print len(hist)
print len(bin_edges)
pp.plot(bin_edges[range(len(bin_edges) - 1)], hist, 'ro')
pp.show()
def main():
'''All times are in seconds'''
data = ad.readInput('test.txt')
plotCountRatePerTime(data, 2)
pp.clf()
plotHistOfCountRates(data)
main()
|
rickerc/neutron_audit
|
refs/heads/cis-havana-staging
|
neutron/tests/unit/ml2/test_type_gre.py
|
1
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_gre
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
class GreTypeTest(base.BaseTestCase):
def setUp(self):
super(GreTypeTest, self).setUp()
ml2_db.initialize()
self.driver = type_gre.GreTypeDriver()
self.driver.gre_id_ranges = TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.session = db.get_session()
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.gre_id_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 + 1))
)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertEqual(None, alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertEqual(None, segment)
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_gre_endpoints(self):
tun_1 = self.driver.add_endpoint(TUNNEL_IP_ONE)
tun_2 = self.driver.add_endpoint(TUNNEL_IP_TWO)
self.assertEqual(TUNNEL_IP_ONE, tun_1.ip_address)
self.assertEqual(TUNNEL_IP_TWO, tun_2.ip_address)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
self.assertIn(endpoint['ip_address'],
[TUNNEL_IP_ONE, TUNNEL_IP_TWO])
class GreTypeMultiRangeTest(base.BaseTestCase):
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(GreTypeMultiRangeTest, self).setUp()
ml2_db.initialize()
self.driver = type_gre.GreTypeDriver()
self.driver.gre_id_ranges = self.TUNNEL_MULTI_RANGES
self.driver._sync_gre_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_gre_allocation(self.session, key)
self.assertFalse(alloc.allocated)
|
hilaskis/UAV_MissionPlanner
|
refs/heads/master
|
Lib/struct.py
|
247
|
from _struct import *
from _struct import _clearcache
from _struct import __doc__
|
ukncsc/viper
|
refs/heads/master
|
viper/modules/peepdf/aespython/key_expander.py
|
43
|
#!/usr/bin/env python
"""
AES Key Expansion.
Expands 128, 192, or 256 bit key for use with AES
Running this file as __main__ will result in a self-test of the algorithm.
Algorithm per NIST FIPS-197 http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
Copyright (c) 2010, Adam Newman http://www.caller9.com/
Licensed under the MIT license http://www.opensource.org/licenses/mit-license.php
"""
__author__ = "Adam Newman"
#Normally use relative import. In test mode use local import.
try:from .aes_tables import sbox,rcon
except ValueError:from aes_tables import sbox,rcon
from operator import xor
class KeyExpander:
"""Perform AES Key Expansion"""
_expanded_key_length = {128 : 176, 192 : 208, 256 : 240}
def __init__(self, key_length):
self._key_length = key_length
self._n = key_length>>3
if key_length in self._expanded_key_length:
self._b = self._expanded_key_length[key_length]
else:
raise LookupError('Invalid Key Size')
def expand(self, new_key):
"""
Expand the encryption key per AES key schedule specifications
http://en.wikipedia.org/wiki/Rijndael_key_schedule#Key_schedule_description
"""
#First n bytes are copied from key
len_new_key = len(new_key)
if len_new_key != self._n:
raise RuntimeError('expand(): key size is invalid')
rcon_iter = 1
nex=new_key.extend
#Grow the key until it is the correct length
while 1:
#Copy last 4 bytes of extended key, apply core, increment i(rcon_iter),
#core Append the list of elements 1-3 and list comprised of element 0 (circular rotate left)
#core For each element of this new list, put the result of sbox into output array.
#xor with 4 bytes n bytes from end of extended key
keyarr=[sbox[i] for i in new_key[-3:]+new_key[-4:-3]]
#First byte of output array is XORed with rcon(iter)
keyarr[0] ^= rcon[rcon_iter]
nex(map(xor,keyarr, new_key[-self._n:4-self._n]))
rcon_iter += 1
len_new_key += 4
#Run three passes of 4 byte expansion using copy of 4 byte tail of extended key
#which is then xor'd with 4 bytes n bytes from end of extended key
for j in 0,1,2:
nex(map(xor,new_key[-4:], new_key[-self._n:4-self._n]))
len_new_key += 4
if len_new_key >= self._b:return new_key
else:
#If key length is 256 and key is not complete, add 4 bytes tail of extended key
#run through sbox before xor with 4 bytes n bytes from end of extended key
if self._key_length == 256:
nex(map(xor,[sbox[x] for x in new_key[-4:]], new_key[-self._n:4-self._n]))
len_new_key += 4
if len_new_key >= self._b:return new_key
#If key length is 192 or 256 and key is not complete, run 2 or 3 passes respectively
#of 4 byte tail of extended key xor with 4 bytes n bytes from end of extended key
if self._key_length != 128:
for j in ((0,1) if self._key_length == 192 else (0,1,2)):
nex(map(xor,new_key[-4:], new_key[-self._n:4-self._n]))
len_new_key += 4
if len_new_key >= self._b:return new_key
import unittest
class TestKeyExpander(unittest.TestCase):
def test_keys(self):
"""Test All Key Expansions"""
import test_keys
test_data = test_keys.TestKeys()
for key_size in 128, 192, 256:
test_expander = KeyExpander(key_size)
test_expanded_key = test_expander.expand(test_data.test_key[key_size])
self.assertEqual (len([i for i, j in zip(test_expanded_key, test_data.test_expanded_key_validated[key_size]) if i == j]),
len(test_data.test_expanded_key_validated[key_size]),
msg='Key expansion ' + str(key_size) + ' bit')
if __name__ == "__main__":
unittest.main()
|
2014c2g14/c2g14
|
refs/heads/master
|
w2/static/Brython2.0.0-20140209-164925/Lib/_pyio.py
|
103
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except InterruptedError:
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except InterruptedError:
continue
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_pyio.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
Foxfanmedium/python_training
|
refs/heads/master
|
OnlineCoursera/mail_ru/Python_1/Week_3/1_classes_1.py
|
1
|
# num = 13
# print(isinstance(num, int)) # Проверка принядлежит ли Num классу int
# print(isinstance(num, float))
# class Human:
# pass
# class Robot:
# """Данный класс позволяет создавать роботов"""
#
#
# print(Robot)
# print(dir(Robot))
# class Planet:
# pass
# planet = Planet()
# print(planet)
#
# solar_system = []
# for i in range(8):
# planet = Planet()
# solar_system.append(planet)
#
# print(solar_system)
#
# solar_system = {}
# for i in range(8):
# planet = Planet()
# solar_system[planet] = True
#
# print(solar_system)
# class Planet:
# def __init__(self, name):
# self.name = name
#
#
# earth = Planet('Earth')
# print(earth.name)
# print(earth)
# class Planet:
# def __init__(self, name):
# self.name = name
#
# def __str__(self):
# return self.name
#
#
# earth = Planet('Earth')
# print(earth)
class Planet:
def __init__(self, name):
self.name = name
def __repr__(self):
return f"Planet {self.name}"
solar_system = []
planet_names = ["Mercury", "Venus", "Earth",
"Mars", "Jupiter", "Saturn", "Uranus", "Neptune"]
for name in planet_names:
planet = Planet(name)
solar_system.append(planet)
print(solar_system)
|
android-ia/platform_external_chromium_org
|
refs/heads/master
|
tools/cr/cr/commands/install.py
|
113
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the install command."""
import cr
class InstallCommand(cr.Command):
"""The implementation of the install command.
This first uses Builder.Build to bring the target up to date, and then
installs it using Installer.Reinstall.
The builder installs its command line arguments, and you can use those to
select which builder is used. Selecting the skip builder
(using --builder=skip) bypasses the build stage.
"""
def __init__(self):
super(InstallCommand, self).__init__()
self.help = 'Install a binary'
def AddArguments(self, subparsers):
parser = super(InstallCommand, self).AddArguments(subparsers)
cr.Builder.AddArguments(self, parser)
cr.Installer.AddArguments(self, parser)
cr.Target.AddArguments(self, parser, allow_multiple=True)
self.ConsumeArgs(parser, 'the installer')
return parser
def Run(self):
targets = cr.Target.GetTargets()
if not cr.Installer.Skipping():
cr.Builder.Build(targets, [])
cr.Installer.Reinstall(targets, cr.context.remains)
|
effa/flocs
|
refs/heads/master
|
common/flow_factors.py
|
3
|
from enum import Enum
class FlowFactors(Enum):
"""
Common constants to denote factors affecting flow
"""
STUDENT_BIAS = 0
TASK_BIAS = 1
LOOPS = 2
CONDITIONS = 3
LOGIC_EXPR = 4
COLORS = 5
TOKENS = 6
PITS = 7
@classmethod
def game_factors(cls):
return [cls.COLORS, cls.TOKENS, cls.PITS]
@classmethod
def concept_factors(cls):
return [cls.LOOPS, cls.CONDITIONS, cls.LOGIC_EXPR]
@classmethod
def common_factors(cls):
return cls.concept_factors() + cls.game_factors()
@classmethod
def student_factors(cls):
return [cls.STUDENT_BIAS] + cls.common_factors()
@classmethod
def task_factors(cls):
return [cls.TASK_BIAS] + cls.common_factors()
|
devendermishrajio/nova
|
refs/heads/master
|
nova/api/openstack/compute/schemas/quota_classes.py
|
43
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.schemas import quota_sets
update = {
'type': 'object',
'properties': {
'type': 'object',
'quota_class_set': {
'properties': quota_sets.quota_resources,
'additionalProperties': False,
},
},
'required': ['quota_class_set'],
'additionalProperties': False,
}
|
LaunchKey/launchkey-python
|
refs/heads/master
|
tests/test_entities_directory.py
|
2
|
import unittest
from launchkey.entities.directory import Directory, DeviceStatus, Device, \
DirectoryUserTOTP
class TestDirectoryUserTOTP(unittest.TestCase):
def test_input_attributes(self):
totp = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
self.assertEqual("A Secret", totp.secret)
self.assertEqual("An Algorithm", totp.algorithm)
self.assertEqual(30, totp.period)
self.assertEqual(6, totp.digits)
def test_equal_entities(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
self.assertTrue(totp_1 == totp_2)
def test_different_secret(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = DirectoryUserTOTP({
"secret": "Another Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
self.assertFalse(totp_1 == totp_2)
self.assertTrue(totp_1 != totp_2)
def test_different_algorithm(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "Another Algorithm",
"period": 30,
"digits": 6
})
self.assertFalse(totp_1 == totp_2)
self.assertTrue(totp_1 != totp_2)
def test_different_period(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 60,
"digits": 6
})
self.assertFalse(totp_1 == totp_2)
self.assertTrue(totp_1 != totp_2)
def test_different_digits(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 8
})
self.assertFalse(totp_1 == totp_2)
self.assertTrue(totp_1 != totp_2)
def test_different_entity(self):
totp_1 = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
totp_2 = "Not a Directory User TOTP"
self.assertFalse(totp_1 == totp_2)
self.assertTrue(totp_1 != totp_2)
def test_repr(self):
totp = DirectoryUserTOTP({
"secret": "A Secret",
"algorithm": "An Algorithm",
"period": 30,
"digits": 6
})
self.assertEqual(
'DirectoryUserTOTP <secret="A Secret", algorithm="An Algorithm", '
'period=30, digits=6>',
repr(totp)
)
self.assertEqual(
'DirectoryUserTOTP <secret="A Secret", algorithm="An Algorithm", '
'period=30, digits=6>',
str(totp)
)
class TestDirectoryEntity(unittest.TestCase):
def test_input_attributes(self):
directory = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertEqual(directory.service_ids,
['740c36bd-43cb-4238-8f4b-a75307c5ef62'])
self.assertEqual(directory.sdk_keys,
["7acf6dc0-8db8-40e4-8045-2a73471adc58"])
self.assertTrue(directory.premium)
self.assertEqual(directory.name, "Directory Name")
self.assertEqual(directory.android_key, "A Key")
self.assertEqual(directory.ios_certificate_fingerprint,
"A Fingerprint")
self.assertTrue(directory.active)
self.assertEqual(directory.id, "d36f81de-7683-48aa-b3cb-d4c6bffef3c5")
self.assertTrue(directory.denial_context_inquiry_enabled)
def test_equal_directories(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertTrue(directory_1 == directory_2)
def test_different_service_ids(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef61'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_sdk_keys(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc57"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_premium(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": False,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_name(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name 2",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_android_key(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "Another Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_ios_cert_fingerprint(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "Another Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_active(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": False,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_id(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c4",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_denial_context_inquery_enabled(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": False,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertFalse(directory_1 == directory_2)
def test_different_webhook_url(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": False,
"webhook_url": "https://my.webhook.url/otherpath"
}
)
self.assertFalse(directory_1 == directory_2)
def test_not_equal(self):
directory_1 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
directory_2 = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": False,
"webhook_url": "https://my.webhook.url/otherpath"
}
)
self.assertTrue(directory_1 != directory_2)
def test_different_type(self):
directory = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
not_directory = {
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": False,
"webhook_url": "https://my.webhook.url/path"
}
self.assertFalse(directory == not_directory)
def test_repr(self):
directory = Directory(
{
"service_ids": ['740c36bd-43cb-4238-8f4b-a75307c5ef62'],
"sdk_keys": ["7acf6dc0-8db8-40e4-8045-2a73471adc58"],
"premium": True,
"name": "Directory Name",
"android_key": "A Key",
"ios_certificate_fingerprint": "A Fingerprint",
"active": True,
"id": "d36f81de-7683-48aa-b3cb-d4c6bffef3c5",
"denial_context_inquiry_enabled": True,
"webhook_url": "https://my.webhook.url/path"
}
)
self.assertEqual(
str(directory),
'Directory <id="d36f81de-7683-48aa-b3cb-d4c6bffef3c5", '
'name="Directory Name", '
'service_ids=[\'740c36bd-43cb-4238-8f4b-a75307c5ef62\'], '
'sdk_keys=[\'7acf6dc0-8db8-40e4-8045-2a73471adc58\'], '
'premium=True, ios_certificate_fingerprint="A Fingerprint", '
'active=True, denial_context_inquiry_enabled=True, '
'webhook_url="https://my.webhook.url/path">'
)
class TestDeviceEntity(unittest.TestCase):
def test_device_repr(self):
device = Device(
{
"id": '740c36bd-43cb-4238-8f4b-a75307c5ef62',
"name": "A Device",
"status": 1,
"type": "Android"
}
)
self.assertEqual(
str(device),
'Device <id="740c36bd-43cb-4238-8f4b-a75307c5ef62", '
'name="A Device", status=DeviceStatus <status_code="LINKED", '
'is_active=True>, type="Android">'
)
class TestDeviceStatusEntity(unittest.TestCase):
def test_0(self):
status = DeviceStatus(0)
self.assertEqual(status.status_code, "LINK_PENDING")
self.assertFalse(status.is_active)
def test_device_repr_0(self):
device = DeviceStatus(0)
self.assertEqual(
str(device),
'DeviceStatus <status_code="LINK_PENDING", is_active=False>'
)
def test_1(self):
status = DeviceStatus(1)
self.assertEqual(status.status_code, "LINKED")
self.assertTrue(status.is_active)
def test_device_repr_1(self):
device = DeviceStatus(1)
self.assertEqual(
str(device),
'DeviceStatus <status_code="LINKED", is_active=True>'
)
def test_2(self):
status = DeviceStatus(2)
self.assertEqual(status.status_code, "UNLINK_PENDING")
self.assertTrue(status.is_active)
def test_device_repr_2(self):
device = DeviceStatus(2)
self.assertEqual(
str(device),
'DeviceStatus <status_code="UNLINK_PENDING", is_active=True>'
)
|
glwu/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/xmlrpc/__init__.py
|
1383
|
# This directory is a Python package.
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/memcached/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Memcached(AutotoolsPackage):
"""
Memcached is a high performance multithreaded event-based key/value
cache store intended to be used in a distributed system.
"""
homepage = "https://github.com/memcached/memcached"
url = "https://github.com/memcached/memcached/archive/1.5.20.tar.gz"
version('1.5.20', sha256='ee93aff47123e0b464e9f007b651b14c89c19e0c20352d8d1c399febbb038cb6')
version('1.5.19', sha256='7af7a2e9b1f468d7f6056f23ce21c04936ce6891f8cb8cd54e133f489a8226e8')
version('1.5.18', sha256='0bf8154f53d2781164421acd195a1665ac2f77316263c3526206c38e402c4b0d')
version('1.5.17', sha256='cb30ad851e95c0190e6b7e59695f1ed2e51d65a9e6c82c893e043dc066053377')
version('1.5.16', sha256='a0c1a7e72186722d7c0e9d5527a63beb339b933d768687f183e163adf935c662')
version('1.5.15', sha256='4ef8627308e99bdd4200ef4f260fbcdd65a4ba634bd593ca02dbbfd71222e9f7')
version('1.5.14', sha256='ae8ed2ed853b840a8430d8575d4e91b87c550b111874b416c551001403ac6a74')
version('1.5.13', sha256='ae59a8b49be17afb344e57c8a8d64f9ae38b6efbc3f9115a422dbcb2b23795fc')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libevent', type='build')
def autoreconf(self, spec, prefix):
sh = which('sh')
sh('./autogen.sh')
autoreconf('--install', '--verbose', '--force')
def configure_args(self):
args = ['--with-libevent={0}'.format(self.spec['libevent'].prefix)]
return args
|
magvugr/AT
|
refs/heads/master
|
EntVirtual/lib/python2.7/site-packages/setuptools/command/install_lib.py
|
454
|
from distutils.command.install_lib import install_lib as _install_lib
import os
class install_lib(_install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
if not py_file.endswith('.py'):
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
exclude = {}
nsp = self.distribution.namespace_packages
if (nsp and self.get_finalized_command('install')
.single_version_externally_managed
):
for pkg in nsp:
parts = pkg.split('.')
while parts:
pkgdir = os.path.join(self.install_dir, *parts)
for f in '__init__.py', '__init__.pyc', '__init__.pyo':
exclude[os.path.join(pkgdir,f)] = 1
parts.pop()
return exclude
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return _install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = _install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
|
0909023/Dev6B_English_Website
|
refs/heads/master
|
DjangoWebProject1/DjangoWebProject1/env/Scripts/django-admin.py
|
1
|
#!C:\Users\Alllexa\Source\Repos\Dev6B_English_Website\DjangoWebProject1\DjangoWebProject1\env\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
auduny/home-assistant
|
refs/heads/dev
|
homeassistant/components/thermoworks_smoke/sensor.py
|
7
|
"""
Support for getting the state of a Thermoworks Smoke Thermometer.
Requires Smoke Gateway Wifi with an internet connection.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.thermoworks_smoke/
"""
import logging
from requests import RequestException
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TEMP_FAHRENHEIT, CONF_EMAIL, CONF_PASSWORD,\
CONF_MONITORED_CONDITIONS, CONF_EXCLUDE, ATTR_BATTERY_LEVEL
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
PROBE_1 = 'probe1'
PROBE_2 = 'probe2'
PROBE_1_MIN = 'probe1_min'
PROBE_1_MAX = 'probe1_max'
PROBE_2_MIN = 'probe2_min'
PROBE_2_MAX = 'probe2_max'
BATTERY_LEVEL = 'battery'
FIRMWARE = 'firmware'
SERIAL_REGEX = '^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$'
# map types to labels
SENSOR_TYPES = {
PROBE_1: 'Probe 1',
PROBE_2: 'Probe 2',
PROBE_1_MIN: 'Probe 1 Min',
PROBE_1_MAX: 'Probe 1 Max',
PROBE_2_MIN: 'Probe 2 Min',
PROBE_2_MAX: 'Probe 2 Max',
}
# exclude these keys from thermoworks data
EXCLUDE_KEYS = [
FIRMWARE
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[PROBE_1, PROBE_2]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_EXCLUDE, default=[]):
vol.All(cv.ensure_list, [cv.matches_regex(SERIAL_REGEX)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the thermoworks sensor."""
import thermoworks_smoke
from requests.exceptions import HTTPError
email = config[CONF_EMAIL]
password = config[CONF_PASSWORD]
monitored_variables = config[CONF_MONITORED_CONDITIONS]
excluded = config[CONF_EXCLUDE]
try:
mgr = thermoworks_smoke.initialize_app(email, password, True, excluded)
# list of sensor devices
dev = []
# get list of registered devices
for serial in mgr.serials():
for variable in monitored_variables:
dev.append(ThermoworksSmokeSensor(variable, serial, mgr))
add_entities(dev, True)
except HTTPError as error:
msg = "{}".format(error.strerror)
if 'EMAIL_NOT_FOUND' in msg or \
'INVALID_PASSWORD' in msg:
_LOGGER.error("Invalid email and password combination")
else:
_LOGGER.error(msg)
class ThermoworksSmokeSensor(Entity):
"""Implementation of a thermoworks smoke sensor."""
def __init__(self, sensor_type, serial, mgr):
"""Initialize the sensor."""
self._name = "{name} {sensor}".format(
name=mgr.name(serial), sensor=SENSOR_TYPES[sensor_type])
self.type = sensor_type
self._state = None
self._attributes = {}
self._unit_of_measurement = TEMP_FAHRENHEIT
self._unique_id = "{serial}-{type}".format(
serial=serial, type=sensor_type)
self.serial = serial
self.mgr = mgr
self.update_unit()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id for the sensor."""
return self._unique_id
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return self._unit_of_measurement
def update_unit(self):
"""Set the units from the data."""
if PROBE_2 in self.type:
self._unit_of_measurement = self.mgr.units(self.serial, PROBE_2)
else:
self._unit_of_measurement = self.mgr.units(self.serial, PROBE_1)
def update(self):
"""Get the monitored data from firebase."""
from stringcase import camelcase, snakecase
try:
values = self.mgr.data(self.serial)
# set state from data based on type of sensor
self._state = values.get(camelcase(self.type))
# set units
self.update_unit()
# set basic attributes for all sensors
self._attributes = {
'time': values['time'],
'localtime': values['localtime']
}
# set extended attributes for main probe sensors
if self.type in [PROBE_1, PROBE_2]:
for key, val in values.items():
# add all attributes that don't contain any probe name
# or contain a matching probe name
if (
(self.type == PROBE_1 and key.find(PROBE_2) == -1)
or
(self.type == PROBE_2 and key.find(PROBE_1) == -1)
):
if key == BATTERY_LEVEL:
key = ATTR_BATTERY_LEVEL
else:
# strip probe label and convert to snake_case
key = snakecase(key.replace(self.type, ''))
# add to attrs
if key and key not in EXCLUDE_KEYS:
self._attributes[key] = val
# store actual unit because attributes are not converted
self._attributes['unit_of_min_max'] = self._unit_of_measurement
except (RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update status for %s", self.name)
|
emencia/emencia-crma
|
refs/heads/master
|
crma/admin.py
|
1
|
"""Admin for pilot_academy.logs"""
# Import from the Standard Library
from os.path import join
# Import from Django
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.admin import ModelAdmin
from django.contrib import admin
from django import forms
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
# Import from here
from crma import models
# Import from Django Model-Translation
from modeltranslation.admin import TranslationAdmin
#
# EMAILS
#
class EmailAdmin(TranslationAdmin):
list_display = ('channel', 'subject', 'interval', 'tag', 'enabled')
list_filter = ('channel', 'enabled')
class Media:
js = (
'modeltranslation/js/force_jquery.js',
join(settings.STATIC_URL, 'js/jquery-ui.js'),
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
def get_urls(self):
urls = super(EmailAdmin, self).get_urls()
preview = self.admin_site.admin_view(self.preview)
return patterns('', url(r'^(?P<id>\d+)/preview/$', preview)) + urls
def preview(self, request, *args, **kw):
email = get_object_or_404(models.Email, id=kw['id'])
lang = request.GET.get('lang')
body = getattr(email, 'body_%s' % lang) if lang else email.body
return HttpResponse(body)
class EmailSchedulerAdmin(ModelAdmin):
list_display = ('from_address', 'contact', 'email', 'lang',
'ctime', 'sched_time', 'sent_time', 'status')
search_fields = ('from_address', 'email__subject', 'contact__email')
list_filter = ('status', 'lang', 'email__channel', 'email')
readonly_fields = ('extra_context', 'trace_error')
raw_id_fields = ('contact',)
fieldsets = (
(_("Emails"), {
'fields': (
'from_address', 'contact', 'email', 'lang', 'sched_time'
)}),
(_("Status"), {
'fields': (
('sent_time', 'status'),
)}),
(_("Context"), {
'fields': (
('extra_context', 'trace_error'),
)}),
)
def get_urls(self):
urls = super(EmailSchedulerAdmin, self).get_urls()
preview = self.admin_site.admin_view(self.preview)
return patterns('', url(r'^(?P<id>\d+)/preview/$', preview)) + urls
def preview(self, request, *args, **kw):
email = get_object_or_404(models.EmailScheduler, id=kw['id'])
return email.render(request)
class SubscriptionAdmin(ModelAdmin):
list_display = ('channel', 'contact', 'state')
search_fields = ('channel__title', 'contact__email')
list_filter = ('channel', 'state')
raw_id_fields = ('contact',)
fieldsets = (
(_("Emails"), {
'fields': (
'channel', 'contact',
)}),
(_("Subscribed info"), {
'fields': (
('state', 'unsubscribe_key'),
)}),
)
readonly_fields = ('unsubscribe_key', )
class ChannelAdmin(TranslationAdmin):
list_display = ('title', 'from_address', 'required')
list_filter = ('title', 'from_address', 'required')
prepopulated_fields = {"channel_id": ("title",)}
class Media:
js = (
'modeltranslation/js/force_jquery.js',
join(settings.STATIC_URL, 'js/jquery-ui.js'),
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
class ContactAdmin(ModelAdmin):
list_display = ('email', 'lang')
list_filter = ('lang',)
search_fields = ('email',)
class MembersInline(admin.TabularInline):
model = models.MailingList.members.through
extra = 0
raw_id_fields = ('contact',)
class MailingListForm(forms.ModelForm):
csv = forms.FileField(
required=False,
help_text=u'The CSV file must have a header line with two columns: '
u'"email" and "lang".',
)
class Meta:
model = models.MailingList
class MailingListAdmin(ModelAdmin):
list_display = ('title',)
filter_horizontal = ['members']
form = MailingListForm
exclude = ('members',)
inlines = [MembersInline]
def save_related(self, request, form, formsets, change):
proxy = super(MailingListAdmin, self)
proxy.save_related(request, form, formsets, change)
csv = form.cleaned_data['csv']
if csv:
obj = form.instance
obj.import_contacts(csv)
admin.site.register(models.Channel, ChannelAdmin)
admin.site.register(models.Email, EmailAdmin)
admin.site.register(models.EmailScheduler, EmailSchedulerAdmin)
admin.site.register(models.Subscription, SubscriptionAdmin)
admin.site.register(models.Contact, ContactAdmin)
admin.site.register(models.MailingList, MailingListAdmin)
|
NeovaHealth/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/compile_all.py
|
384
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import compileall
compileall.compile_dir('package')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
srkukarni/heron
|
refs/heads/master
|
integration_test/src/python/local_test_runner/test_scale_up.py
|
6
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_scale_up.py"""
import logging
import subprocess
from ..common import status
import test_template
class TestScaleUp(test_template.TestTemplate):
expected_container_count = 1
expected_instance_count = 3
def get_expected_container_count(self):
return self.expected_container_count
def get_expected_min_instance_count(self):
return self.expected_instance_count
def execute_test_case(self):
scale_up(self.params['cliPath'], self.params['cluster'], self.params['topologyName'])
self.expected_container_count += 1
self.expected_instance_count += 1
def pre_check_results(self, physical_plan_json):
instances = physical_plan_json['instances']
instance_count = len(instances)
if instance_count != self.expected_instance_count:
raise status.TestFailure("Found %s instances but expected %s: %s" %
(instance_count, self.expected_instance_count, instances))
def scale_up(heron_cli_path, test_cluster, topology_name):
splitcmd = [
heron_cli_path, 'update', '--verbose', test_cluster, topology_name,
'--component-parallelism=identity-bolt:2'
]
logging.info("Increasing number of component instances: %s", splitcmd)
if subprocess.call(splitcmd) != 0:
raise status.TestFailure("Unable to update topology %s" % topology_name)
logging.info("Increased number of component instances")
|
kouk/boto
|
refs/heads/develop
|
boto/mashups/__init__.py
|
782
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
patmun/pynetdicom
|
refs/heads/master
|
netdicom/test/wlmscp.py
|
2
|
"""
Worklist Manager SCP example.
This demonstrates a simple application entity that support a number of
Query service classes. For this example to work, you need an SCU
sending to this host on specified port.
usage: python wlmscp.py
"""
import sys
import datetime
import netdicom
import dcmtkscu
# callbacks
def OnAssociateRequest(association):
print "association requested", association
def OnReceiveEcho(self):
print "Echo received"
def OnReceiveFind(self, ds):
print "Received C-FIND"
for ii in range(5):
ds.PatientsName = 'titi' + str(ii)
print "sending fake response: patient name: %s" % ds.PatientsName
print ds
print
yield ds, 0xFF00
# responding to find request
# setup AE
print 'Create AE...'
MyAE = netdicom.AE('localhost', 9999,
SOPSCU=[],
SOPSCP=[netdicom.VerificationSOPClass,
netdicom.ModalityWorklistInformationFindSOPClass]
)
MyAE.OnAssociateRequest = OnAssociateRequest
MyAE.OnReceiveEcho = OnReceiveEcho
MyAE.OnReceiveFind = OnReceiveFind
# Start modality simulator
dcmtkscu.run_in_term(
'findscu -v -W -aec AE1 -k 0010,0020="*" -k 0010,0040="*" -k 0010,0030="*" '
'-k 0008,0052="PATIENT" -k 0008,0060="MR" -k 0040,0001="*" '
'localhost 9999')
# start AE
print "starting AE ... "
MyAE.start()
print "Entering processing loop..."
MyAE.QuitOnKeyboardInterrupt()
|
kaze/paasmaker
|
refs/heads/master
|
paasmaker/util/postgresdaemon.py
|
2
|
#
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import os
import re
import signal
import shutil
import tempfile
import logging
import subprocess
import time
import unittest
import uuid
import platform
import paasmaker
from ..common.testhelpers import TestHelpers
from manageddaemon import ManagedDaemon, ManagedDaemonError
import tornado.testing
class PostgresDaemonError(ManagedDaemonError):
pass
class PostgresDaemon(ManagedDaemon):
"""
Start and manage a Postgres daemon in a custom data
directory, for use by other services/plugins.
If you provide a password to the configure method,
the user 'postgres' will have that password. From there,
you can interact with the daemon as normal.
You should use a port other than 5432 for it, so as to
not conflict with any system installation of Postgres.
"""
def _eat_output(self):
return open("%s/%s" % (self.parameters['working_dir'], str(uuid.uuid4())), 'w')
def configure(self, working_dir, postgres_binaries_path, port, bind_host, callback, error_callback, password=None):
"""
Configure this instance.
:arg str working_dir: The working directory.
:arg str postgres_binaries_path: The path to the binaries for Postgres.
:arg int port: The port to listen on.
:arg str bind_host: The address to bind to.
:arg callable callback: The callback to call once done.
:arg callable error_callback: The error callback on error.
:arg str|None password: An optional password for the
postgres user.
"""
self.parameters['working_dir'] = working_dir
self.parameters['postgres_binaries_path'] = postgres_binaries_path
self.parameters['port'] = port
self.parameters['host'] = bind_host
self.parameters['password'] = password
# Create the working dir. If this fails, let it bubble up.
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# Now, we actually need to run pg_ctl initdb to get it all set up.
command_line = "%s -D %s --username=postgres" % (
os.path.join(self.parameters['postgres_binaries_path'], 'initdb'),
working_dir
)
pwfile = None
if password:
pwfile = tempfile.mkstemp()[1]
open(pwfile, 'w').write(password)
command_line += ' --auth=md5 --pwfile=' + pwfile
def installed_db(code):
if code == 0:
# Success!
self._fetch_output()
self.save_parameters()
callback("Successfully created Postgres database.")
else:
# Failed. Send back stdout/stderr.
raw_output = self._fetch_output()
error_callback("Failed to create Postgres database:\n" + raw_output)
paasmaker.util.popen.Popen(
command_line,
on_stdout=self._fetchable_output,
redirect_stderr=True,
on_exit=installed_db,
io_loop=self.configuration.io_loop,
)
def start(self, callback, error_callback):
"""
Start up the server for this instance.
"""
# Fire up the server.
logging.info("Starting up postgres server on port %d." % self.parameters['port'])
# Use a string here isntead of an array, because it was munging the
# sub arguments.
command_line = "%s start -D %s -o '-p %d -k %s'" % (
os.path.join(self.parameters['postgres_binaries_path'], 'pg_ctl'),
self.parameters['working_dir'],
self.parameters['port'],
self.parameters['working_dir']
)
paasmaker.util.popen.Popen(
command_line,
on_stdout=self._fetchable_output,
redirect_stderr=True,
io_loop=self.configuration.io_loop
)
def timeout(message):
# Fetch the output and call the error callback.
raw_output = self._fetch_output()
error_callback("Failed to start:\n" + raw_output)
logging.info("MySQL started, waiting for listening state.")
self._wait_until_port_inuse(
self.parameters['port'],
callback,
timeout
)
def is_running(self, keyword=None):
# TODO: This isn't async, but none of the rest is Async. Fix this.
command_line = [
os.path.join(self.parameters['postgres_binaries_path'], 'pg_ctl'),
'status',
'-D',
self.parameters['working_dir']
]
code = subprocess.call(
command_line,
stdout=self._eat_output(),
stderr=self._eat_output()
)
return code == 0
def stop(self, callback, error_callback, sig=signal.SIGTERM):
"""
Stop this instance of the Postgres server, allowing for it to be restarted later.
"""
command_line = [
os.path.join(self.parameters['postgres_binaries_path'], 'pg_ctl'),
'status',
'-D',
self.parameters['working_dir']
]
def found_pid(code):
if code == 0:
output = self._fetch_output()
pid = int(re.search('(\d+)', output).group(1))
try:
os.kill(pid, sig)
except OSError, ex:
# No such process. That's ok.
# Continue.
pass
# Wait for the process to finish.
self._wait_until_stopped(callback, error_callback)
else:
callback("Not running, no action taken.")
paasmaker.util.popen.Popen(
command_line,
on_stdout=self._fetchable_output,
redirect_stderr=True,
on_exit=found_pid,
io_loop=self.configuration.io_loop,
)
def destroy(self, callback, error_callback):
"""
Destroy this instance of Postgres, removing all assigned data.
"""
# Hard shutdown - we're about to delete the data anyway.
def stopped(message):
shutil.rmtree(self.parameters['working_dir'])
callback("Removed Postgres instance.")
self.stop(stopped, error_callback, signal.SIGKILL)
class PostgresDaemonTest(tornado.testing.AsyncTestCase, TestHelpers):
def _postgres_path(self):
if platform.system() == 'Darwin':
# Postgres binaries are in the path on OSX.
return ""
else:
# TODO: This is Ubuntu specific.
return "/usr/lib/postgresql/9.1/bin"
def setUp(self):
super(PostgresDaemonTest, self).setUp()
self.configuration = paasmaker.common.configuration.ConfigurationStub(0, [], io_loop=self.io_loop)
def tearDown(self):
if hasattr(self, 'server'):
self.server.destroy(self.stop, self.stop)
self.wait()
self.configuration.cleanup(self.stop, self.stop)
self.wait()
super(PostgresDaemonTest, self).tearDown()
def test_basic(self):
self.server = PostgresDaemon(self.configuration)
self.server.configure(
self.configuration.get_scratch_path_exists('postgres'),
self._postgres_path(),
self.configuration.get_free_port(),
'127.0.0.1', # TODO: This doesn't work yet.
self.stop,
self.stop
)
result = self.wait()
self.assertIn("Successfully", result, "Wrong message.")
self.server.start(self.stop, self.stop)
result = self.wait()
self.assertIn("In appropriate state", result, "Failed to start Postgres server.")
self.assertTrue(self.server.is_running())
self.server.stop(self.stop, self.stop)
result = self.wait()
self.assertFalse(self.server.is_running())
# Start it again.
self.server.start(self.stop, self.stop)
result = self.wait()
self.assertIn("In appropriate state", result, "Failed to start Postgres server.")
self.assertTrue(self.server.is_running())
|
krenzlin/bilthoven
|
refs/heads/master
|
tests/test_process_functions.py
|
1
|
from itertools import izip
from numpy import array, array_equal
def test_block_iterator():
from bilthoven.process_functions import block_iterator
data = range(10)
assert array_equal(list(block_iterator(data, 5, 5)), [array([0,1,2,3,4]), array([5,6,7,8,9])])
result_should = [array([0,1,2,3,4,5]), array([6,7,8,9])]
for ret, res in izip(block_iterator(data, 6, 6), result_should):
assert array_equal(ret, res)
result_should = [array([0,1,2,3,4]), array([3,4,5,6,7]), array([6,7,8,9])]
for ret, res in izip(block_iterator(data, 5, 3), result_should):
assert array_equal(ret, res)
def test_process():
from bilthoven.process_functions import process_single_channel
from bilthoven.transformations import reverse
data = array(range(10))
result_should = array([1,0,3,2,5,4,7,6,9,8])
result_is = process_single_channel(data, reverse, block_size=2)
assert array_equal(result_is, result_should)
|
yuyangit/tornado
|
refs/heads/master
|
maint/test/appengine/py27/runtests.py
|
1
|
../common/runtests.py
|
zhuwenping/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/samples/blogger/BloggerExampleV1.py
|
133
|
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file demonstrates how to use the Google Data API's Python client library
# to interface with the Blogger service. There are examples for the following
# operations:
#
# * Retrieving the list of all the user's blogs
# * Retrieving all posts on a single blog
# * Performing a date-range query for posts on a blog
# * Creating draft posts and publishing posts
# * Updating posts
# * Retrieving comments
# * Creating comments
# * Deleting comments
# * Deleting posts
__author__ = 'lkeppler@google.com (Luke Keppler)'
from gdata import service
import gdata
import atom
import getopt
import sys
class BloggerExample:
def __init__(self, email, password):
"""Creates a GDataService and provides ClientLogin auth details to it.
The email and password are required arguments for ClientLogin. The
'source' defined below is an arbitrary string, but should be used to
reference your name or the name of your organization, the app name and
version, with '-' between each of the three values."""
# Authenticate using ClientLogin.
self.service = service.GDataService(email, password)
self.service.source = 'Blogger_Python_Sample-1.0'
self.service.service = 'blogger'
self.service.server = 'www.blogger.com'
self.service.ProgrammaticLogin()
# Get the blog ID for the first blog.
feed = self.service.Get('/feeds/default/blogs')
self_link = feed.entry[0].GetSelfLink()
if self_link:
self.blog_id = self_link.href.split('/')[-1]
def PrintUserBlogTitles(self):
"""Prints a list of all the user's blogs."""
# Request the feed.
query = service.Query()
query.feed = '/feeds/default/blogs'
feed = self.service.Get(query.ToUri())
# Print the results.
print feed.title.text
for entry in feed.entry:
print "\t" + entry.title.text
print
def CreatePost(self, title, content, author_name, is_draft):
"""This method creates a new post on a blog. The new post can be stored as
a draft or published based on the value of the is_draft parameter. The
method creates an GDataEntry for the new post using the title, content,
author_name and is_draft parameters. With is_draft, True saves the post as
a draft, while False publishes the post. Then it uses the given
GDataService to insert the new post. If the insertion is successful, the
added post (GDataEntry) will be returned.
"""
# Create the entry to insert.
entry = gdata.GDataEntry()
entry.author.append(atom.Author(atom.Name(text=author_name)))
entry.title = atom.Title(title_type='xhtml', text=title)
entry.content = atom.Content(content_type='html', text=content)
if is_draft:
control = atom.Control()
control.draft = atom.Draft(text='yes')
entry.control = control
# Ask the service to insert the new entry.
return self.service.Post(entry,
'/feeds/' + self.blog_id + '/posts/default')
def PrintAllPosts(self):
"""This method displays the titles of all the posts in a blog. First it
requests the posts feed for the blogs and then it prints the results.
"""
# Request the feed.
feed = self.service.GetFeed('/feeds/' + self.blog_id + '/posts/default')
# Print the results.
print feed.title.text
for entry in feed.entry:
if not entry.title.text:
print "\tNo Title"
else:
print "\t" + entry.title.text
print
def PrintPostsInDateRange(self, start_time, end_time):
"""This method displays the title and modification time for any posts that
have been created or updated in the period between the start_time and
end_time parameters. The method creates the query, submits it to the
GDataService, and then displays the results.
Note that while the start_time is inclusive, the end_time is exclusive, so
specifying an end_time of '2007-07-01' will include those posts up until
2007-6-30 11:59:59PM.
The start_time specifies the beginning of the search period (inclusive),
while end_time specifies the end of the search period (exclusive).
"""
# Create query and submit a request.
query = service.Query()
query.feed = '/feeds/' + self.blog_id + '/posts/default'
query.updated_min = start_time
query.updated_max = end_time
query.orderby = 'updated'
feed = self.service.Get(query.ToUri())
# Print the results.
print feed.title.text + " posts between " + start_time + " and " + end_time
print feed.title.text
for entry in feed.entry:
if not entry.title.text:
print "\tNo Title"
else:
print "\t" + entry.title.text
print
def UpdatePostTitle(self, entry_to_update, new_title):
"""This method updates the title of the given post. The GDataEntry object
is updated with the new title, then a request is sent to the GDataService.
If the insertion is successful, the updated post will be returned.
Note that other characteristics of the post can also be modified by
updating the values of the entry object before submitting the request.
The entry_to_update is a GDatEntry containing the post to update.
The new_title is the text to use for the post's new title. Returns: a
GDataEntry containing the newly-updated post.
"""
# Set the new title in the Entry object
entry_to_update.title = atom.Title('xhtml', new_title)
# Grab the edit URI
edit_uri = entry_to_update.GetEditLink().href
return self.service.Put(entry_to_update, edit_uri)
def CreateComment(self, post_id, comment_text):
"""This method adds a comment to the specified post. First the comment
feed's URI is built using the given post ID. Then a GDataEntry is created
for the comment and submitted to the GDataService. The post_id is the ID
of the post on which to post comments. The comment_text is the text of the
comment to store. Returns: an entry containing the newly-created comment
NOTE: This functionality is not officially supported yet.
"""
# Build the comment feed URI
feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default'
# Create a new entry for the comment and submit it to the GDataService
entry = gdata.GDataEntry()
entry.content = atom.Content(content_type='xhtml', text=comment_text)
return self.service.Post(entry, feed_uri)
def PrintAllComments(self, post_id):
"""This method displays all the comments for the given post. First the
comment feed's URI is built using the given post ID. Then the method
requests the comments feed and displays the results. Takes the post_id
of the post on which to view comments.
"""
# Build comment feed URI and request comments on the specified post
feed_url = '/feeds/' + self.blog_id + '/comments/default'
feed = self.service.Get(feed_url)
# Display the results
print feed.title.text
for entry in feed.entry:
print "\t" + entry.title.text
print "\t" + entry.updated.text
print
def DeleteComment(self, post_id, comment_id):
"""This method removes the comment specified by the given edit_link_href, the
URI for editing the comment.
"""
feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default/' + comment_id
self.service.Delete(feed_uri)
def DeletePost(self, edit_link_href):
"""This method removes the post specified by the given edit_link_href, the
URI for editing the post.
"""
self.service.Delete(edit_link_href)
def run(self):
"""Runs each of the example methods defined above, demonstrating how to
interface with the Blogger service.
"""
# Demonstrate retrieving a list of the user's blogs.
self.PrintUserBlogTitles()
# Demonstrate how to create a draft post.
draft_post = self.CreatePost("Snorkling in Aruba",
"<p>We had <b>so</b> much fun snorkling in Aruba<p>",
"Post author", True)
print "Successfully created draft post: \"" + draft_post.title.text + "\".\n"
# Demonstrate how to publish a public post.
public_post = self.CreatePost("Back from vacation",
"<p>I didn't want to leave Aruba, but I ran out of money :(<p>",
"Post author", False)
print "Successfully created public post: \"" + public_post.title.text + "\".\n"
# Demonstrate various feed queries.
print "Now listing all posts."
self.PrintAllPosts()
print "Now listing all posts between 2007-04-04 and 2007-04-23."
self.PrintPostsInDateRange("2007-04-04", "2007-04-23")
# Demonstrate updating a post's title.
print "Now updating the title of the post we just created:"
public_post = self.UpdatePostTitle(public_post, "The party's over")
print "Successfully changed the post's title to \"" + public_post.title.text + "\".\n"
# Demonstrate how to retrieve the comments for a post.
# Get the post ID and build the comments feed URI for the specified post
self_id = public_post.id.text
tokens = self_id.split("-")
post_id = tokens[-1]
print "Now posting a comment on the post titled: \"" + public_post.title.text + "\"."
comment = self.CreateComment(post_id, "Did you see any sharks?")
print "Successfully posted \"" + comment.content.text + "\" on the post titled: \"" + public_post.title.text + "\".\n"
comment_id = comment.GetEditLink().href.split("/")[-1]
print "Now printing all comments"
self.PrintAllComments(post_id)
# Delete the comment we just posted
print "Now deleting the comment we just posted"
self.DeleteComment(post_id, comment_id)
print "Successfully deleted comment."
self.PrintAllComments(post_id)
# Get the post's edit URI
edit_uri = public_post.GetEditLink().href
# Demonstrate deleting posts.
print "Now deleting the post titled: \"" + public_post.title.text + "\"."
self.DeletePost(edit_uri)
print "Successfully deleted post."
self.PrintAllPosts()
def main():
"""The main function runs the BloggerExample application with the provided
username and password values. Authentication credentials are required.
NOTE: It is recommended that you run this sample using a test account."""
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["email=", "password="])
except getopt.error, msg:
print ('python BloggerExample.py --email [email] --password [password] ')
sys.exit(2)
email = ''
password = ''
# Process options
for o, a in opts:
if o == "--email":
email = a
elif o == "--password":
password = a
if email == '' or password == '':
print ('python BloggerExample.py --email [email] --password [password]')
sys.exit(2)
sample = BloggerExample(email, password)
sample.run()
if __name__ == '__main__':
main()
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/selectWord/list/before.py
|
83
|
x = [1, 2, 3, 4<caret>]
|
mclois/iteexe
|
refs/heads/master
|
twisted/manhole/ui/gtk2manhole.py
|
14
|
# -*- Python -*-
# $Id: gtk2manhole.py,v 1.9 2003/09/07 19:58:09 acapnotic Exp $
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Manhole client with a GTK v2.x front-end.
"""
__version__ = '$Revision: 1.9 $'[11:-2]
from twisted import copyright
from twisted.internet import reactor
from twisted.python import components, failure, log, util
from twisted.spread import pb
from twisted.spread.ui import gtk2util
from twisted.manhole.service import IManholeClient
from zope.interface import implements
# The pygtk.require for version 2.0 has already been done by the reactor.
import gtk
import code, types, inspect
# TODO:
# Make wrap-mode a run-time option.
# Explorer.
# Code doesn't cleanly handle opening a second connection. Fix that.
# Make some acknowledgement of when a command has completed, even if
# it has no return value so it doesn't print anything to the console.
class OfflineError(Exception):
pass
class ManholeWindow(components.Componentized, gtk2util.GladeKeeper):
gladefile = util.sibpath(__file__, "gtk2manhole.glade")
_widgets = ('input','output','manholeWindow')
def __init__(self):
self.defaults = {}
gtk2util.GladeKeeper.__init__(self)
components.Componentized.__init__(self)
self.input = ConsoleInput(self._input)
self.input.toplevel = self
self.output = ConsoleOutput(self._output)
# Ugh. GladeKeeper actually isn't so good for composite objects.
# I want this connected to the ConsoleInput's handler, not something
# on this class.
self._input.connect("key_press_event", self.input._on_key_press_event)
def setDefaults(self, defaults):
self.defaults = defaults
def login(self):
client = self.getComponent(IManholeClient)
d = gtk2util.login(client, **self.defaults)
d.addCallback(self._cbLogin)
d.addCallback(client._cbLogin)
d.addErrback(self._ebLogin)
def _cbDisconnected(self, perspective):
self.output.append("%s went away. :(\n" % (perspective,), "local")
self._manholeWindow.set_title("Manhole")
def _cbLogin(self, perspective):
peer = perspective.broker.transport.getPeer()
self.output.append("Connected to %s\n" % (peer,), "local")
perspective.notifyOnDisconnect(self._cbDisconnected)
self._manholeWindow.set_title("Manhole - %s" % (peer))
return perspective
def _ebLogin(self, reason):
self.output.append("Login FAILED %s\n" % (reason.value,), "exception")
def _on_aboutMenuItem_activate(self, widget, *unused):
import sys
from os import path
self.output.append("""\
a Twisted Manhole client
Versions:
%(twistedVer)s
Python %(pythonVer)s on %(platform)s
GTK %(gtkVer)s / PyGTK %(pygtkVer)s
%(module)s %(modVer)s
http://twistedmatrix.com/
""" % {'twistedVer': copyright.longversion,
'pythonVer': sys.version.replace('\n', '\n '),
'platform': sys.platform,
'gtkVer': ".".join(map(str, gtk.gtk_version)),
'pygtkVer': ".".join(map(str, gtk.pygtk_version)),
'module': path.basename(__file__),
'modVer': __version__,
}, "local")
def _on_openMenuItem_activate(self, widget, userdata=None):
self.login()
def _on_manholeWindow_delete_event(self, widget, *unused):
reactor.stop()
def _on_quitMenuItem_activate(self, widget, *unused):
reactor.stop()
def on_reload_self_activate(self, *unused):
from twisted.python import rebuild
rebuild.rebuild(inspect.getmodule(self.__class__))
tagdefs = {
'default': {"family": "monospace"},
# These are message types we get from the server.
'stdout': {"foreground": "black"},
'stderr': {"foreground": "#AA8000"},
'result': {"foreground": "blue"},
'exception': {"foreground": "red"},
# Messages generate locally.
'local': {"foreground": "#008000"},
'log': {"foreground": "#000080"},
'command': {"foreground": "#666666"},
}
# TODO: Factor Python console stuff back out to pywidgets.
class ConsoleOutput:
_willScroll = None
def __init__(self, textView):
self.textView = textView
self.buffer = textView.get_buffer()
# TODO: Make this a singleton tag table.
for name, props in tagdefs.iteritems():
tag = self.buffer.create_tag(name)
# This can be done in the constructor in newer pygtk (post 1.99.14)
for k, v in props.iteritems():
tag.set_property(k, v)
self.buffer.tag_table.lookup("default").set_priority(0)
self._captureLocalLog()
def _captureLocalLog(self):
return log.startLogging(_Notafile(self, "log"), setStdout=False)
def append(self, text, kind=None):
# XXX: It seems weird to have to do this thing with always applying
# a 'default' tag. Can't we change the fundamental look instead?
tags = ["default"]
if kind is not None:
tags.append(kind)
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(),
text, *tags)
# Silly things, the TextView needs to update itself before it knows
# where the bottom is.
if self._willScroll is None:
self._willScroll = gtk.idle_add(self._scrollDown)
def _scrollDown(self, *unused):
self.textView.scroll_to_iter(self.buffer.get_end_iter(), 0,
True, 1.0, 1.0)
self._willScroll = None
return False
class History:
def __init__(self, maxhist=10000):
self.ringbuffer = ['']
self.maxhist = maxhist
self.histCursor = 0
def append(self, htext):
self.ringbuffer.insert(-1, htext)
if len(self.ringbuffer) > self.maxhist:
self.ringbuffer.pop(0)
self.histCursor = len(self.ringbuffer) - 1
self.ringbuffer[-1] = ''
def move(self, prevnext=1):
'''
Return next/previous item in the history, stopping at top/bottom.
'''
hcpn = self.histCursor + prevnext
if hcpn >= 0 and hcpn < len(self.ringbuffer):
self.histCursor = hcpn
return self.ringbuffer[hcpn]
else:
return None
def histup(self, textbuffer):
if self.histCursor == len(self.ringbuffer) - 1:
si, ei = textbuffer.get_start_iter(), textbuffer.get_end_iter()
self.ringbuffer[-1] = textbuffer.get_text(si,ei)
newtext = self.move(-1)
if newtext is None:
return
textbuffer.set_text(newtext)
def histdown(self, textbuffer):
newtext = self.move(1)
if newtext is None:
return
textbuffer.set_text(newtext)
class ConsoleInput:
toplevel, rkeymap = None, None
__debug = False
def __init__(self, textView):
self.textView=textView
self.rkeymap = {}
self.history = History()
for name in dir(gtk.keysyms):
try:
self.rkeymap[getattr(gtk.keysyms, name)] = name
except TypeError:
pass
def _on_key_press_event(self, entry, event):
stopSignal = False
ksym = self.rkeymap.get(event.keyval, None)
mods = []
for prefix, mask in [('ctrl', gtk.gdk.CONTROL_MASK), ('shift', gtk.gdk.SHIFT_MASK)]:
if event.state & mask:
mods.append(prefix)
if mods:
ksym = '_'.join(mods + [ksym])
if ksym:
rvalue = getattr(
self, 'key_%s' % ksym, lambda *a, **kw: None)(entry, event)
if self.__debug:
print ksym
return rvalue
def getText(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
return text
def setText(self, text):
self.textView.get_buffer().set_text(text)
def key_Return(self, entry, event):
text = self.getText()
# Figure out if that Return meant "next line" or "execute."
try:
c = code.compile_command(text)
except SyntaxError, e:
# This could conceivably piss you off if the client's python
# doesn't accept keywords that are known to the manhole's
# python.
point = buffer.get_iter_at_line_offset(e.lineno, e.offset)
buffer.place(point)
# TODO: Componentize!
self.toplevel.output.append(str(e), "exception")
except (OverflowError, ValueError), e:
self.toplevel.output.append(str(e), "exception")
else:
if c is not None:
self.sendMessage()
# Don't insert Return as a newline in the buffer.
self.history.append(text)
self.clear()
# entry.emit_stop_by_name("key_press_event")
return True
else:
# not a complete code block
return False
return False
def key_Up(self, entry, event):
# if I'm at the top, previous history item.
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == 0:
self.history.histup(textbuffer)
return True
return False
def key_Down(self, entry, event):
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == (
textbuffer.get_line_count() - 1):
self.history.histdown(textbuffer)
return True
return False
key_ctrl_p = key_Up
key_ctrl_n = key_Down
def key_ctrl_shift_F9(self, entry, event):
if self.__debug:
import pdb; pdb.set_trace()
def clear(self):
buffer = self.textView.get_buffer()
buffer.delete(*buffer.get_bounds())
def sendMessage(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
self.toplevel.output.append(pythonify(text), 'command')
# TODO: Componentize better!
try:
return self.toplevel.getComponent(IManholeClient).do(text)
except OfflineError:
self.toplevel.output.append("Not connected, command not sent.\n",
"exception")
def pythonify(text):
'''
Make some text appear as though it was typed in at a Python prompt.
'''
lines = text.split('\n')
lines[0] = '>>> ' + lines[0]
return '\n... '.join(lines) + '\n'
class _Notafile:
"""Curry to make failure.printTraceback work with the output widget."""
def __init__(self, output, kind):
self.output = output
self.kind = kind
def write(self, txt):
self.output.append(txt, self.kind)
def flush(self):
pass
class ManholeClient(components.Adapter, pb.Referenceable):
implements(IManholeClient)
capabilities = {
# "Explorer": 'Set',
"Failure": 'Set'
}
def _cbLogin(self, perspective):
self.perspective = perspective
perspective.notifyOnDisconnect(self._cbDisconnected)
return perspective
def remote_console(self, messages):
for kind, content in messages:
if isinstance(content, types.StringTypes):
self.original.output.append(content, kind)
elif (kind == "exception") and isinstance(content, failure.Failure):
content.printTraceback(_Notafile(self.original.output,
"exception"))
else:
self.original.output.append(str(content), kind)
def remote_receiveExplorer(self, xplorer):
pass
def remote_listCapabilities(self):
return self.capabilities
def _cbDisconnected(self, perspective):
self.perspective = None
def do(self, text):
if self.perspective is None:
raise OfflineError
return self.perspective.callRemote("do", text)
components.backwardsCompatImplements(ManholeClient)
components.registerAdapter(ManholeClient, ManholeWindow, IManholeClient)
|
django-nonrel/django-nonrel
|
refs/heads/develop
|
tests/regressiontests/test_client_regress/models.py
|
21
|
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
import os
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.template import (TemplateDoesNotExist, TemplateSyntaxError,
Context, Template, loader)
import django.template.context
from django.test import Client, TestCase
from django.test.client import encode_file
from django.test.utils import ContextList
class AssertContainsTests(TestCase):
def setUp(self):
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError, e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, u'さかき')
self.assertContains(r, '\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, u'はたけ')
self.assertNotContains(r, '\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: No templates used to render the response", str(e))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertIn("Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEqual(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEqual(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertIn("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])", str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])", str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def setUp(self):
self.old_SESSION_ENGINE = settings.SESSION_ENGINE
settings.SESSION_ENGINE = 'regressiontests.test_client_regress.session'
def tearDown(self):
settings.SESSION_ENGINE = self.old_SESSION_ENGINE
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_no_404_template(self):
"Missing templates are correctly reported by test client"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about missing template")
except TemplateDoesNotExist:
pass
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'bad_templates'),)
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/test_client_regress/arg_view/somename/')
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
try:
django.template.context._standard_context_processors = (lambda request: {'path': request.special_path},)
response = self.client.get("/test_client_regress/request_context_view/")
self.assertContains(response, 'Path: /test_client_regress/request_context_view/')
finally:
django.template.context._standard_context_processors = None
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, 'request method: HEAD')
self.assertEqual(response.content, '')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
# Removed 'put' and 'delete' here as they are 'GET-like requests'
for method_name in ('get','head','options'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = u'{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json)
response = self.client.put("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json)
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
response = self.client.put("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
response = self.client.put("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
response = self.client.put("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return 'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual('--TEST_BOUNDARY', encoded_file[0])
self.assertEqual('Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual('TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual('Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
'Content-Type: application/x-compress',
'Content-Type: application/x-zip',
'Content-Type: application/x-zip-compressed',
'Content-Type: application/zip',))
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
class RequestHeadersTest(TestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/test_client_regress/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/test_client_regress/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, "HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/test_client_regress/check_headers/',
status_code=301, target_status_code=200)
class ResponseTemplateDeprecationTests(TestCase):
"""
Response.template still works backwards-compatibly, but with pending deprecation warning. Refs #12226.
"""
def test_response_template_data(self):
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.template.__class__, Template)
self.assertEqual(response.template.name, 'base.html')
def test_response_no_template(self):
response = self.client.get("/test_client_regress/request_methods/")
self.assertEqual(response.template, None)
class RawPostDataTest(TestCase):
"Access to request.raw_post_data from the test client."
def test_raw_post_data(self):
# Refs #14753
try:
response = self.client.get("/test_client_regress/raw_post_data/")
except AssertionError:
self.fail("Accessing request.raw_post_data from a view fetched with GET by the test client shouldn't fail.")
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
testing/web-platform/tests/service-workers/service-worker/resources/mime-type-worker.py
|
158
|
def main(request, response):
if 'mime' in request.GET:
return [('Content-Type', request.GET['mime'])], ""
return [], ""
|
weaver-viii/h2o-3
|
refs/heads/master
|
h2o-py/h2o/expr.py
|
2
|
import h2o
import math
class ExprNode:
""" Composable Expressions: This module contains code for the lazy expression DAG. """
def __init__(self,op,*args):
self._op=op # unary/binary/prefix op
self._children=[ExprNode._arg_to_expr(a) for a in args] # a list of ExprNode instances; the children of "this" node; (e.g. (+ left rite) self._children = [left,rite] )
def _eager(self,sb=None):
""" This call is mutually recursive with ExprNode._do_it and H2OFrame._do_it """
if sb is None: sb = []
sb += ["(",self._op," "]
for child in self._children: ExprNode._do_it(child,sb)
sb += [") "]
return sb
@staticmethod
def _do_it(child,sb):
if isinstance(child, h2o.H2OFrame): child._do_it(sb)
elif isinstance(child, ExprNode): child._eager(sb)
else: sb+=[str(child)+" "]
@staticmethod
def _arg_to_expr(arg):
if isinstance(arg, (ExprNode, h2o.H2OFrame)): return arg
elif isinstance(arg, bool): return "%{}".format("TRUE" if arg else "FALSE")
elif isinstance(arg, (int, float)): return "#{}".format("NaN" if math.isnan(arg) else arg)
elif isinstance(arg, (unicode,str)): return '"'+arg+'"'
elif isinstance(arg, slice): return "(: #{} #{})".format(0 if arg.start is None else arg.start,"NaN" if math.isnan(arg.stop) else arg.stop-1)
elif isinstance(arg, list): return ("(slist \"" + "\" \"".join(arg) + "\")") if isinstance(arg[0], (str,unicode)) else ("(dlist #" + " #".join([str(i) for i in arg])+")")
elif arg is None: return "()"
raise ValueError("Unexpected arg type: " + str(type(arg)))
@staticmethod
def _collapse_sb(sb): return ' '.join("".join(sb).replace("\n", "").split()).replace(" )", ")")
def _debug_print(self,pprint=True): return "".join(self._to_string(sb=[])) if pprint else ExprNode._collapse_sb(self._to_string(sb=[]))
def _to_string(self,depth=0,sb=None):
sb += ['\n', " "*depth, "("+self._op, " "]
for child in self._children:
if isinstance(child, h2o.H2OFrame) and not child._computed: child._ast._to_string(depth+2,sb)
elif isinstance(child, ExprNode): child._to_string(depth+2,sb)
else: sb+=['\n', ' '*(depth+2), str(child)]
sb+=['\n',' '*depth+") "] + ['\n'] * (depth==0) # add a \n if depth == 0
return sb
|
ysung-pivotal/incubator-hawq
|
refs/heads/master
|
tools/bin/pythonSrc/PyGreSQL-4.0/setup.py
|
24
|
#!/usr/bin/env python
# $Id: setup.py,v 1.27 2008/11/21 17:08:17 cito Exp $
"""Setup script for PyGreSQL version 4.0
Authors and history:
* PyGreSQL written 1997 by D'Arcy J.M. Cain <darcy@druid.net>
* based on code written 1995 by Pascal Andre <andre@chimay.via.ecp.fr>
* setup script created 2000/04 Mark Alexander <mwa@gate.net>
* tweaked 2000/05 Jeremy Hylton <jeremy@cnri.reston.va.us>
* win32 support 2001/01 by Gerhard Haering <gerhard@bigfoot.de>
* tweaked 2006/02 and 2008/11 by Christoph Zwerschke <cito@online.de>
Prerequisites to be installed:
* Python including devel package (header files and distutils)
* PostgreSQL libs and devel packages (header files of client and server)
* PostgreSQL pg_config tool (usually included in the devel package)
(the Windows installer has it as part of the database server feature)
Tested with Python 2.5.2 and PostGreSQL 8.3.5. Older version should work
as well, but you will need at least Python 2.3 and PostgreSQL 7.4.
Use as follows:
python setup.py build # to build the module
python setup.py install # to install it
You should use MinGW (www.mingw.org) for building on Win32:
python setup.py build -c mingw32 install # use MinGW
Note that Python newer than version 2.3 is using msvcr71 instead of msvcrt
as its common runtime library. So, if you are using MinGW to build PyGreSQL,
you should edit the file "%MinGWpath%/lib/gcc/%MinGWversion%/specs"
and change the entry that reads -lmsvcrt to -lmsvcr71.
See docs.python.org/doc/install/ for more information on
using distutils to install Python programs.
"""
version = "4.0"
import sys
if not (2, 2) < sys.version_info[:2] < (3, 0):
raise Exception("PyGreSQL %s requires a Python 2 version"
" newer than 2.2." % version)
import os
from distutils.core import setup
from distutils.extension import Extension
def pg_config(s):
"""Retrieve information about installed version of PostgreSQL."""
if os.path.exists("../../../../src/bin/pg_config/pg_config"):
f = os.popen("../../../../src/bin/pg_config/pg_config --%s" % s)
else:
"""If a VPATH build, it might not be there. Look other places"""
"""It should be the one in the path, because the makefile includes greenplum_path.sh """
f = os.popen("pg_config --%s" % s)
d = f.readline().strip()
if f.close() is not None:
raise Exception("pg_config tool is not available.")
if not d:
raise Exception("Could not get %s information." % s)
return d
def mk_include():
"""Create a temporary local include directory.
The directory will contain a copy of the PostgreSQL server header files,
where all features which are not necessary for PyGreSQL are disabled.
"""
os.mkdir('include')
for f in os.listdir(pg_include_dir_server):
if not f.endswith('.h'):
continue
d = open(os.path.join(pg_include_dir_server, f)).read()
if f == 'pg_config.h':
d += '\n'.join(('',
'#undef ENABLE_NLS',
'#undef USE_REPL_SNPRINTF',
'#undef USE_SSL',
'#undef USE_ZLIB',
'#undef HAVE_STDINT_H',
'#undef HAVE_SYS_TIME_H',
'#undef HAVE_UNISTD_H',
'#define _CRT_SECURE_NO_WARNINGS 1',
'#define _USE_32BIT_TIME_T 1',
''))
open(os.path.join('include', f), 'w').write(d)
def rm_include():
"""Remove the temporary local include directory."""
if os.path.exists('include'):
for f in os.listdir('include'):
os.remove(os.path.join('include', f))
os.rmdir('include')
pg_include_dir = pg_config('includedir')
pg_include_dir_server = pg_config('includedir-server')
rm_include()
mk_include()
include_dirs = ['include', pg_include_dir, pg_include_dir_server]
pg_libdir = pg_config('libdir')
library_dirs = [pg_libdir]
libraries=['pq']
if sys.platform == "win32":
include_dirs.append(os.path.join(pg_include_dir_server, 'port/win32'))
setup(
name="PyGreSQL",
version=version,
description="Python PostgreSQL Interfaces",
long_description = ("PyGreSQL is an open-source Python module"
" that interfaces to a PostgreSQL database."
" It embeds the PostgreSQL query library to allow easy use"
" of the powerful PostgreSQL features from a Python script."),
keywords="postgresql database api dbapi",
author="D'Arcy J. M. Cain",
author_email="darcy@PyGreSQL.org",
url="http://www.pygresql.org",
download_url = "ftp://ftp.pygresql.org/pub/distrib/",
platforms = ["any"],
license="Python",
py_modules=['pg', 'pgdb'],
ext_modules=[Extension(
'_pg', ['pgmodule.c'],
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = ['-O2']
)],
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
rm_include()
|
pixelrebirth/ProjectMeiva
|
refs/heads/master
|
app/__init__.py
|
27
|
from flask import Flask
app = Flask(__name__)
from app import views
|
bokeh/bokeh
|
refs/heads/branch-2.4
|
examples/models/file/graphs.py
|
1
|
"""
Zachary's Karate Club graph
Data file from:
http://vlado.fmf.uni-lj.si/pub/networks/data/Ucinet/UciData.htm
Reference:
Zachary W. (1977).
An information flow model for conflict and fission in small groups.
Journal of Anthropological Research, 33, 452-473.
"""
import networkx as nx
from bokeh.io import curdoc, show
from bokeh.models import (BoxSelectTool, Circle, Column, EdgesAndLinkedNodes, HoverTool,
MultiLine, NodesAndLinkedEdges, Plot, Range1d, Row, TapTool)
from bokeh.palettes import Spectral4
from bokeh.plotting import from_networkx
G = nx.karate_club_graph()
def create_graph(layout_func, inspection_policy=None, selection_policy=None, **kwargs):
plot = Plot(width=400, height=400,
x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
graph_renderer = from_networkx(G, layout_func, **kwargs)
graph_renderer.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC", line_alpha=0.8, line_width=5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)
if inspection_policy is not None:
graph_renderer.inspection_policy = inspection_policy
if selection_policy is not None:
graph_renderer.selection_policy = selection_policy
plot.renderers.append(graph_renderer)
return plot
plot_1 = create_graph(nx.circular_layout, inspection_policy=NodesAndLinkedEdges(), scale=1, center=(0,0))
plot_1.title.text = "Circular Layout (NodesAndLinkedEdges inspection policy)"
plot_1.add_tools(HoverTool(tooltips=None))
plot_2 = create_graph(nx.spring_layout, selection_policy=NodesAndLinkedEdges(), scale=2, center=(0,0))
plot_2.title.text = "Spring Layout (NodesAndLinkedEdges selection policy)"
plot_2.add_tools(TapTool(), BoxSelectTool())
plot_3 = create_graph(nx.random_layout, inspection_policy=EdgesAndLinkedNodes(), center=(0,0))
plot_3.title.text = "Random Layout (EdgesAndLinkedNodes inspection policy)"
plot_3.add_tools(HoverTool(tooltips=None))
plot_4 = create_graph(nx.fruchterman_reingold_layout, selection_policy=EdgesAndLinkedNodes(), scale=2, center=(0,0), dim=2)
plot_4.title.text = "FR Layout (EdgesAndLinkedNodes selection policy)"
plot_4.add_tools(TapTool())
layout = Column(Row(plot_1, plot_2), Row(plot_3, plot_4))
doc = curdoc()
doc.add_root(layout)
show(layout)
|
niknow/scipy
|
refs/heads/master
|
scipy/_build_utils/__init__.py
|
96
|
import numpy as np
from ._fortran import *
from scipy._lib._version import NumpyVersion
# Don't use deprecated Numpy C API. Define this to a fixed version instead of
# NPY_API_VERSION in order not to break compilation for released Scipy versions
# when Numpy introduces a new deprecation. Use in setup.py::
#
# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api)
#
if NumpyVersion(np.__version__) >= '1.10.0.dev':
numpy_nodepr_api = dict(define_macros=[("NPY_NO_DEPRECATED_API",
"NPY_1_9_API_VERSION")])
else:
numpy_nodepr_api = dict()
|
timpalpant/calibre
|
refs/heads/master
|
src/calibre/utils/fonts/win_fonts.py
|
14
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, atexit
from itertools import product
from calibre import prints, isbytestring
from calibre.constants import plugins, filesystem_encoding
from calibre.utils.fonts.utils import (is_truetype_font, get_font_names,
get_font_characteristics)
class WinFonts(object):
def __init__(self, winfonts):
self.w = winfonts
# Windows requires font files to be executable for them to be loaded,
# so instead we use this hack.
self.app_font_families = {}
for f in ('Serif', 'Sans', 'Mono'):
base = 'fonts/liberation/Liberation%s-%s.ttf'
self.app_font_families['Liberation %s'%f] = m = {}
for weight, is_italic in product( (self.w.FW_NORMAL, self.w.FW_BOLD), (False, True) ):
name = {(self.w.FW_NORMAL, False):'Regular',
(self.w.FW_NORMAL, True):'Italic',
(self.w.FW_BOLD, False):'Bold',
(self.w.FW_BOLD, True):'BoldItalic'}[(weight,
is_italic)]
m[(weight, is_italic)] = base%(f, name)
# import pprint
# pprint.pprint(self.app_font_families)
def font_families(self):
names = set()
for font in self.w.enum_font_families():
if (
font['is_truetype'] and
# Fonts with names starting with @ are designed for
# vertical text
not font['name'].startswith('@')
):
names.add(font['name'])
return sorted(names.union(frozenset(self.app_font_families)))
def get_normalized_name(self, is_italic, weight):
if is_italic:
ft = 'bi' if weight == self.w.FW_BOLD else 'italic'
else:
ft = 'bold' if weight == self.w.FW_BOLD else 'normal'
return ft
def fonts_for_family(self, family, normalize=True):
family = type(u'')(family)
ans = {}
for weight, is_italic in product( (self.w.FW_NORMAL, self.w.FW_BOLD), (False, True) ):
if family in self.app_font_families:
m = self.app_font_families[family]
path = m.get((weight, is_italic), None)
if path is None: continue
data = P(path, data=True)
else:
try:
data = self.w.font_data(family, is_italic, weight)
except Exception as e:
prints('Failed to get font data for font: %s [%s] with error: %s'%
(family, self.get_normalized_name(is_italic, weight), e))
continue
ok, sig = is_truetype_font(data)
if not ok:
prints('Not a supported font, sfnt_version: %r'%sig)
continue
ext = 'otf' if sig == b'OTTO' else 'ttf'
try:
weight, is_italic, is_bold, is_regular = get_font_characteristics(data)[:4]
except Exception as e:
prints('Failed to get font characteristic for font: %s [%s]'
' with error: %s'%(family,
self.get_normalized_name(is_italic, weight), e))
continue
try:
family_name, sub_family_name, full_name = get_font_names(data)
except:
pass
if normalize:
ft = {(True, True):'bi', (True, False):'italic', (False,
True):'bold', (False, False):'normal'}[(is_italic,
is_bold)]
else:
ft = (1 if is_italic else 0, weight//10)
if not (family_name or full_name):
# prints('Font %s [%s] has no names'%(family,
# self.get_normalized_name(is_italic, weight)))
family_name = family
name = full_name or family + ' ' + (sub_family_name or '')
try:
name.encode('ascii')
except ValueError:
try:
sub_family_name.encode('ascii')
subf = sub_family_name
except:
subf = ''
name = family + ((' ' + subf) if subf else '')
ans[ft] = (ext, name, data)
return ans
def add_system_font(self, path):
'''
WARNING: The file you are adding must have execute permissions or
windows will fail to add it. (ls -l in cygwin to check)
'''
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path)
ret = self.w.add_system_font(path)
if ret > 0:
atexit.register(self.remove_system_font, path)
return ret
def remove_system_font(self, path):
return self.w.remove_system_font(path)
def load_winfonts():
w, err = plugins['winfonts']
if w is None:
raise RuntimeError('Failed to load the winfonts module: %s'%err)
return WinFonts(w)
def test_ttf_reading():
for f in sys.argv[1:]:
raw = open(f).read()
print (os.path.basename(f))
get_font_characteristics(raw)
print()
def test():
base = os.path.abspath(__file__)
d = os.path.dirname
pluginsd = os.path.join(d(d(d(base))), 'plugins')
if os.path.exists(os.path.join(pluginsd, 'winfonts.pyd')):
sys.path.insert(0, pluginsd)
import winfonts
w = WinFonts(winfonts)
else:
w = load_winfonts()
print (w.w)
families = w.font_families()
print (families)
for family in families:
prints(family + ':')
for font, data in w.fonts_for_family(family).iteritems():
prints(' ', font, data[0], data[1], len(data[2]))
print ()
if __name__ == '__main__':
test()
|
jbbskinny/sympy
|
refs/heads/master
|
sympy/physics/tests/test_hydrogen.py
|
83
|
from sympy import exp, integrate, oo, S, simplify, sqrt, symbols
from sympy.core.compatibility import range
from sympy.physics.hydrogen import R_nl, E_nl, E_nl_dirac
from sympy.utilities.pytest import raises
n, r, Z = symbols('n r Z')
def feq(a, b, max_relative_error=1e-12, max_absolute_error=1e-12):
a = float(a)
b = float(b)
# if the numbers are close enough (absolutely), then they are equal
if abs(a - b) < max_absolute_error:
return True
# if not, they can still be equal if their relative error is small
if abs(b) > abs(a):
relative_error = abs((a - b)/b)
else:
relative_error = abs((a - b)/a)
return relative_error <= max_relative_error
def test_wavefunction():
a = 1/Z
R = {
(1, 0): 2*sqrt(1/a**3) * exp(-r/a),
(2, 0): sqrt(1/(2*a**3)) * exp(-r/(2*a)) * (1 - r/(2*a)),
(2, 1): S(1)/2 * sqrt(1/(6*a**3)) * exp(-r/(2*a)) * r/a,
(3, 0): S(2)/3 * sqrt(1/(3*a**3)) * exp(-r/(3*a)) *
(1 - 2*r/(3*a) + S(2)/27 * (r/a)**2),
(3, 1): S(4)/27 * sqrt(2/(3*a**3)) * exp(-r/(3*a)) *
(1 - r/(6*a)) * r/a,
(3, 2): S(2)/81 * sqrt(2/(15*a**3)) * exp(-r/(3*a)) * (r/a)**2,
(4, 0): S(1)/4 * sqrt(1/a**3) * exp(-r/(4*a)) *
(1 - 3*r/(4*a) + S(1)/8 * (r/a)**2 - S(1)/192 * (r/a)**3),
(4, 1): S(1)/16 * sqrt(5/(3*a**3)) * exp(-r/(4*a)) *
(1 - r/(4*a) + S(1)/80 * (r/a)**2) * (r/a),
(4, 2): S(1)/64 * sqrt(1/(5*a**3)) * exp(-r/(4*a)) *
(1 - r/(12*a)) * (r/a)**2,
(4, 3): S(1)/768 * sqrt(1/(35*a**3)) * exp(-r/(4*a)) * (r/a)**3,
}
for n, l in R:
assert simplify(R_nl(n, l, r, Z) - R[(n, l)]) == 0
def test_norm():
# Maximum "n" which is tested:
n_max = 2 # it works, but is slow, for n_max > 2
for n in range(n_max + 1):
for l in range(n):
assert integrate(R_nl(n, l, r)**2 * r**2, (r, 0, oo)) == 1
def test_hydrogen_energies():
assert E_nl(n, Z) == -Z**2/(2*n**2)
assert E_nl(n) == -1/(2*n**2)
assert E_nl(1, 47) == -S(47)**2/(2*1**2)
assert E_nl(2, 47) == -S(47)**2/(2*2**2)
assert E_nl(1) == -S(1)/(2*1**2)
assert E_nl(2) == -S(1)/(2*2**2)
assert E_nl(3) == -S(1)/(2*3**2)
assert E_nl(4) == -S(1)/(2*4**2)
assert E_nl(100) == -S(1)/(2*100**2)
raises(ValueError, lambda: E_nl(0))
def test_hydrogen_energies_relat():
# First test exact formulas for small "c" so that we get nice expressions:
assert E_nl_dirac(2, 0, Z=1, c=1) == 1/sqrt(2) - 1
assert simplify(E_nl_dirac(2, 0, Z=1, c=2) - ( (8*sqrt(3) + 16)
/ sqrt(16*sqrt(3) + 32) - 4)) == 0
assert simplify(E_nl_dirac(2, 0, Z=1, c=3) - ( (54*sqrt(2) + 81)
/ sqrt(108*sqrt(2) + 162) - 9)) == 0
# Now test for almost the correct speed of light, without floating point
# numbers:
assert simplify(E_nl_dirac(2, 0, Z=1, c=137) - ( (352275361 + 10285412 *
sqrt(1173)) / sqrt(704550722 + 20570824 * sqrt(1173)) - 18769)) == 0
assert simplify(E_nl_dirac(2, 0, Z=82, c=137) - ( (352275361 + 2571353 *
sqrt(12045)) / sqrt(704550722 + 5142706*sqrt(12045)) - 18769)) == 0
# Test using exact speed of light, and compare against the nonrelativistic
# energies:
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l), E_nl(n), 1e-5, 1e-5)
if l > 0:
assert feq(E_nl_dirac(n, l, False), E_nl(n), 1e-5, 1e-5)
Z = 2
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l, Z=Z), E_nl(n, Z), 1e-4, 1e-4)
if l > 0:
assert feq(E_nl_dirac(n, l, False, Z), E_nl(n, Z), 1e-4, 1e-4)
Z = 3
for n in range(1, 5):
for l in range(n):
assert feq(E_nl_dirac(n, l, Z=Z), E_nl(n, Z), 1e-3, 1e-3)
if l > 0:
assert feq(E_nl_dirac(n, l, False, Z), E_nl(n, Z), 1e-3, 1e-3)
# Test the exceptions:
raises(ValueError, lambda: E_nl_dirac(0, 0))
raises(ValueError, lambda: E_nl_dirac(1, -1))
raises(ValueError, lambda: E_nl_dirac(1, 0, False))
|
mhaberler/machinekit
|
refs/heads/master
|
src/emc/usr_intf/axis/scripts/mdi.py
|
27
|
#!/usr/bin/env python
# This is a component of AXIS, a front-end for LinuxCNC
# Copyright 2004, 2005, 2006 Jeff Epler <jepler@unpythonic.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''Manual Data Input - issue a single line of g-code to the running system
mdi.py may be specified on the commandline, e.g.,
bin/mdi g0 x0
'''
import sys, os
import linuxcnc
#if len(sys.argv) > 1:
# linuxcnc.nmlfile = sys.argv[1]
# del sys.argv[1]
c = linuxcnc.command()
s = linuxcnc.stat()
if len(sys.argv) > 1:
c.mode(linuxcnc.MODE_MDI)
c.mdi(" ".join(sys.argv[1:]))
else:
try:
while 1:
mdi = raw_input("MDI> ")
if mdi == '':
s.poll()
print s.position
else:
c.mode(linuxcnc.MODE_MDI)
c.mdi(mdi)
except (SystemExit, EOFError, KeyboardInterrupt): pass
# vim:sw=4:sts=4:et:
|
yitian134/chromium
|
refs/heads/master
|
chrome/test/gpu/generate_webgl_conformance_test_list.py
|
6
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Auto-generates the WebGL conformance test list header file.
Parses the WebGL conformance test *.txt file, which contains a list of URLs
for individual conformance tests (each on a new line). It recursively parses
*.txt files. For each test URL, the matching gtest call is created and
sent to the C++ header file.
"""
import getopt
import os
import re
import sys
COPYRIGHT = """\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
WARNING = """\
// DO NOT EDIT! This file is auto-generated by
// generate_webgl_conformance_test_list.py
// It is included by webgl_conformance_tests.cc
"""
HEADER_GUARD = """\
#ifndef CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
#define CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
"""
HEADER_GUARD_END = """
#endif // CHROME_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_
"""
# Assume this script is run from the src/chrome/test/gpu directory.
INPUT_DIR = "../../../third_party/webgl_conformance"
INPUT_FILE = "00_test_list.txt"
OUTPUT_FILE = "webgl_conformance_test_list_autogen.h"
def main(argv):
"""Main function for the WebGL conformance test list generator.
"""
if not os.path.exists(os.path.join(INPUT_DIR, INPUT_FILE)):
print >> sys.stderr, "ERROR: WebGL conformance tests do not exist."
print >> sys.stderr, "Run the script from the directory containing it."
return 1
output = open(OUTPUT_FILE, "w")
output.write(COPYRIGHT)
output.write(WARNING)
output.write(HEADER_GUARD)
test_prefix = {}
unparsed_files = [INPUT_FILE]
while unparsed_files:
filename = unparsed_files.pop(0)
try:
input = open(os.path.join(INPUT_DIR, filename))
except IOError:
print >> sys.stderr, "WARNING: %s does not exist (skipped)." % filename
continue
for url in input:
url = re.sub("//.*", "", url)
url = re.sub("#.*", "", url)
url = url.strip()
# Some filename has options before them, for example,
# --min-version 1.0.2 testname.html
pos = url.rfind(" ")
if pos != -1:
url = url[pos+1:]
if not url:
continue
# Cannot use os.path.join() because Windows with use "\\" but this path
# is sent through javascript.
if os.path.dirname(filename):
url = "%s/%s" % (os.path.dirname(filename), url)
# Queue all text files for parsing, because test list URLs are nested
# through .txt files.
if re.match(".+00_test_list\.txt\s*$", url):
unparsed_files.append(url)
# Convert the filename to a valid test name and output the gtest code.
else:
name = os.path.splitext(url)[0]
name = re.sub("\W+", "_", name)
if os.path.exists(os.path.join(INPUT_DIR, url)):
output.write('CONFORMANCE_TEST(%s,\n "%s");\n' % (name, url))
else:
print >> sys.stderr, "WARNING: %s does not exist (skipped)." % url
input.close()
output.write(HEADER_GUARD_END)
output.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
arju88nair/projectCulminate
|
refs/heads/master
|
venv/lib/python3.5/site-packages/markupsafe/__init__.py
|
144
|
# -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__version__ = "1.0"
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^& ;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
# We need to make sure the format spec is unicode here as
# otherwise the wrong callback methods are invoked. For
# instance a byte string there would invoke __str__ and
# not __unicode__.
rv = string.Formatter.format_field(
self, value, text_type(format_spec))
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
|
OpenWinCon/OpenWinNet
|
refs/heads/master
|
web-gui/myvenv/lib/python3.4/site-packages/django/utils/cache.py
|
99
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control']))
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
if not response.streaming:
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set(header.lower() for header in vary_headers)
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set(header.lower() for header in vary_headers)
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependent names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(force_bytes(value))
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request URL from the
response object. It stores those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
kostko/itsy
|
refs/heads/master
|
itsy/exceptions.py
|
1
|
class DoesNotExist(Exception):
pass
class MissingVersionMetadata(Exception):
pass
class MutexNotAcquired(Exception):
pass
class DocumentNotSaved(Exception):
pass
class DeleteRestrictedByReference(Exception):
pass
|
fsteggink/stetl
|
refs/heads/master
|
tests/test_args.py
|
3
|
# testing: to be called by nosetests
import os
from stetl.etl import ETL
from tests.stetl_test_case import StetlTestCase
from stetl.main import parse_args
class ConfigTest(StetlTestCase):
"""Basic configuration tests"""
def setUp(self):
super(ConfigTest, self).setUp()
# Initialize Stetl
self.curr_dir = os.path.dirname(os.path.realpath(__file__))
self.cfg_dict = {'config_file': os.path.join(self.curr_dir, 'configs/copy_in_out_file.cfg')}
def clear_stetl_env(self):
# Restore old enviroment
try:
del os.environ['stetl_out_file']
del os.environ['stetl_in_file']
except:
pass
def tearDown(self):
super(ConfigTest, self).tearDown()
self.clear_stetl_env()
def test_config_args_file_single(self):
"""
Test single -a argsfile option
:return:
"""
args_default = os.path.join(self.curr_dir, 'configs/copy_in_out_file_default.args')
args_parsed = parse_args(['-a', args_default])
# Test args substitution from args_dict
config_args = args_parsed.config_args
self.assertEqual(config_args['in_file'], 'default_infile.txt')
self.assertEqual(config_args['out_file'], 'default_outfile.txt')
def test_config_args_explicit_single(self):
"""
Test single -a "arg1=x arg2=y" option
:return:
"""
args_default = os.path.join(self.curr_dir, 'configs/copy_in_out_file_default.args')
args_parsed = parse_args(['-a', 'in_file=default_infile.txt out_file=default_outfile.txt'])
# Test args substitution from args_dict
config_args = args_parsed.config_args
self.assertEqual(config_args['in_file'], 'default_infile.txt')
self.assertEqual(config_args['out_file'], 'default_outfile.txt')
def test_config_args_file_multi(self):
"""
Test multiple: -a argsfile1 -a argsfile2 option with override
:return:
"""
args_default = os.path.join(self.curr_dir, 'configs/copy_in_out_file_default.args')
args_my = os.path.join(self.curr_dir, 'configs/copy_in_out_file_my.args')
args_parsed = parse_args(['-a', args_default, '-a', args_my])
# Test args substitution from args_dict
config_args = args_parsed.config_args
self.assertEqual(config_args['in_file'], 'my_infile.txt')
self.assertEqual(config_args['out_file'], 'default_outfile.txt')
def test_config_args_file_explicit_multi(self):
"""
Test multiple: -a argsfile1 -a arg=myarg option with override
:return:
"""
args_default = os.path.join(self.curr_dir, 'configs/copy_in_out_file_default.args')
args_parsed = parse_args(['-a', args_default, '-a', 'in_file=my_infile.txt'])
# Test args substitution from args_dict
config_args = args_parsed.config_args
self.assertEqual(config_args['in_file'], 'my_infile.txt')
self.assertEqual(config_args['out_file'], 'default_outfile.txt')
def test_args_dict(self):
args_dict = {'in_file': 'infile.txt', 'out_file': 'outfile.txt'}
etl = ETL(self.cfg_dict, args_dict)
# Test args substitution from args_dict
self.assertEqual(etl.configdict.get('input_file', 'file_path'), 'infile.txt')
self.assertEqual(etl.configdict.get('output_file', 'file_path'), 'outfile.txt')
def test_args_dict_env_override(self):
args_dict = {'in_file': 'infile.txt', 'out_file': 'outfile.txt'}
# Override in OS env
os.environ['stetl_in_file'] = 'env_infile.txt'
etl = ETL(self.cfg_dict, args_dict)
# Test args substitution from args_dict
self.assertEqual(etl.configdict.get('input_file', 'file_path'), os.environ['stetl_in_file'])
self.assertEqual(etl.configdict.get('output_file', 'file_path'), 'outfile.txt')
def test_args_dict_env_all(self):
"""
Substitute ALL args from OS env.
:return:
"""
# Set all args in in OS env
os.environ['stetl_in_file'] = 'env_infile.txt'
os.environ['stetl_out_file'] = 'env_outfile.txt'
args_dict = None
etl = ETL(self.cfg_dict, args_dict)
# Test args substitution from args_dict
self.assertEqual(etl.configdict.get('input_file', 'file_path'), os.environ['stetl_in_file'])
self.assertEqual(etl.configdict.get('output_file', 'file_path'), os.environ['stetl_out_file'])
|
xiaoxq/apollo
|
refs/heads/master
|
modules/tools/prediction/multiple_gpu_estimator/mlp_utils.py
|
3
|
#!/usr/bin/env python3
###############################################################################
# Modification Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import six
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
import tensorflow as tf
class RunConfig(tf.contrib.learn.RunConfig):
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = run_config._DEFAULT_UID_WHITE_LIST
state = {
k: v
for k, v in self.__dict__.items() if not k.startswith('__')
}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(list(state.items()), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(
list(ordered_state['_cluster_spec'].as_dict().items()),
key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
class ExamplesPerSecondHook(session_run_hook.SessionRunHook):
"""Hook to print out examples per second.
Total time is tracked and then divided by the total number of steps
to get the average step time and then batch_size is used to determine
the running average of examples per second. The examples per second for the
most recent interval is also logged.
"""
def __init__(
self,
batch_size,
every_n_steps=100,
every_n_secs=None,
):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError('exactly one of every_n_steps'
' and every_n_secs should be provided.')
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
'Global step should be created to use StepCounterHook.')
def before_run(self, run_context): # pylint: disable=unused-argument
return basic_session_run_hooks.SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
global_step = run_values.results
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
steps_per_sec = elapsed_steps / elapsed_time
self._step_train_time += elapsed_time
self._total_steps += elapsed_steps
average_examples_per_sec = self._batch_size * (
self._total_steps / self._step_train_time)
current_examples_per_sec = steps_per_sec * self._batch_size
# Average examples/sec followed by current examples/sec
logging.info('%s: %g (%g), step = %g', 'Average examples/sec',
average_examples_per_sec,
current_examples_per_sec, self._total_steps)
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops is None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string('/{}:{}'.format(
ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
|
saiwing-yeung/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_cluster_iris.py
|
350
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/plugins/doc_fragments/onyx.py
|
44
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
type: path
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: no
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
type: str
'''
|
pgmillon/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_profile_client_ssl.py
|
16
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_client_ssl import ModuleParameters
from library.modules.bigip_profile_client_ssl import ApiParameters
from library.modules.bigip_profile_client_ssl import ModuleManager
from library.modules.bigip_profile_client_ssl import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_client_ssl import ModuleParameters
from ansible.modules.network.f5.bigip_profile_client_ssl import ApiParameters
from ansible.modules.network.f5.bigip_profile_client_ssl import ModuleManager
from ansible.modules.network.f5.bigip_profile_client_ssl import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
ciphers='!SSLv3:!SSLv2:ECDHE+AES-GCM+SHA256:ECDHE-RSA-AES128-CBC-SHA',
cert_key_chain=[
dict(
cert='bigip_ssl_cert1',
key='bigip_ssl_key1',
chain='bigip_ssl_cert1'
)
]
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.ciphers == '!SSLv3:!SSLv2:ECDHE+AES-GCM+SHA256:ECDHE-RSA-AES128-CBC-SHA'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_clientssl.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.ciphers == 'DEFAULT'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
ciphers='!SSLv3:!SSLv2:ECDHE+AES-GCM+SHA256:ECDHE-RSA-AES128-CBC-SHA',
cert_key_chain=[
dict(
cert='bigip_ssl_cert1',
key='bigip_ssl_key1',
chain='bigip_ssl_cert1'
)
],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
andresriancho/PyGithub
|
refs/heads/master
|
github/Commit.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import github.GithubObject
import github.PaginatedList
import github.GitCommit
import github.NamedUser
import github.CommitStatus
import github.File
import github.CommitStats
import github.CommitComment
class Commit(github.GithubObject.GithubObject):
@property
def author(self):
self._completeIfNotSet(self._author)
return self._NoneIfNotSet(self._author)
@property
def commit(self):
self._completeIfNotSet(self._commit)
return self._NoneIfNotSet(self._commit)
@property
def committer(self):
self._completeIfNotSet(self._committer)
return self._NoneIfNotSet(self._committer)
@property
def files(self):
self._completeIfNotSet(self._files)
return self._NoneIfNotSet(self._files)
@property
def parents(self):
self._completeIfNotSet(self._parents)
return self._NoneIfNotSet(self._parents)
@property
def sha(self):
self._completeIfNotSet(self._sha)
return self._NoneIfNotSet(self._sha)
@property
def stats(self):
self._completeIfNotSet(self._stats)
return self._NoneIfNotSet(self._stats)
@property
def url(self):
self._completeIfNotSet(self._url)
return self._NoneIfNotSet(self._url)
def create_comment(self, body, line=github.GithubObject.NotSet, path=github.GithubObject.NotSet, position=github.GithubObject.NotSet):
assert isinstance(body, (str, unicode)), body
assert line is github.GithubObject.NotSet or isinstance(line, (int, long)), line
assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
assert position is github.GithubObject.NotSet or isinstance(position, (int, long)), position
post_parameters = {
"body": body,
}
if line is not github.GithubObject.NotSet:
post_parameters["line"] = line
if path is not github.GithubObject.NotSet:
post_parameters["path"] = path
if position is not github.GithubObject.NotSet:
post_parameters["position"] = position
headers, data = self._requester.requestAndCheck(
"POST",
self.url + "/comments",
None,
post_parameters
)
return github.CommitComment.CommitComment(self._requester, data, completed=True)
def create_status(self, state, target_url=github.GithubObject.NotSet, description=github.GithubObject.NotSet):
assert isinstance(state, (str, unicode)), state
assert target_url is github.GithubObject.NotSet or isinstance(target_url, (str, unicode)), target_url
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
post_parameters = {
"state": state,
}
if target_url is not github.GithubObject.NotSet:
post_parameters["target_url"] = target_url
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestAndCheck(
"POST",
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
None,
post_parameters
)
return github.CommitStatus.CommitStatus(self._requester, data, completed=True)
def get_comments(self):
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
)
def get_statuses(self):
return github.PaginatedList.PaginatedList(
github.CommitStatus.CommitStatus,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
None
)
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._author = github.GithubObject.NotSet
self._commit = github.GithubObject.NotSet
self._committer = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._parents = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._stats = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "author" in attributes: # pragma no branch
assert attributes["author"] is None or isinstance(attributes["author"], dict), attributes["author"]
self._author = None if attributes["author"] is None else github.NamedUser.NamedUser(self._requester, attributes["author"], completed=False)
if "commit" in attributes: # pragma no branch
assert attributes["commit"] is None or isinstance(attributes["commit"], dict), attributes["commit"]
self._commit = None if attributes["commit"] is None else github.GitCommit.GitCommit(self._requester, attributes["commit"], completed=False)
if "committer" in attributes: # pragma no branch
assert attributes["committer"] is None or isinstance(attributes["committer"], dict), attributes["committer"]
self._committer = None if attributes["committer"] is None else github.NamedUser.NamedUser(self._requester, attributes["committer"], completed=False)
if "files" in attributes: # pragma no branch
assert attributes["files"] is None or all(isinstance(element, dict) for element in attributes["files"]), attributes["files"]
self._files = None if attributes["files"] is None else [
github.File.File(self._requester, element, completed=False)
for element in attributes["files"]
]
if "parents" in attributes: # pragma no branch
assert attributes["parents"] is None or all(isinstance(element, dict) for element in attributes["parents"]), attributes["parents"]
self._parents = None if attributes["parents"] is None else [
Commit(self._requester, element, completed=False)
for element in attributes["parents"]
]
if "sha" in attributes: # pragma no branch
assert attributes["sha"] is None or isinstance(attributes["sha"], (str, unicode)), attributes["sha"]
self._sha = attributes["sha"]
if "stats" in attributes: # pragma no branch
assert attributes["stats"] is None or isinstance(attributes["stats"], dict), attributes["stats"]
self._stats = None if attributes["stats"] is None else github.CommitStats.CommitStats(self._requester, attributes["stats"], completed=False)
if "url" in attributes: # pragma no branch
assert attributes["url"] is None or isinstance(attributes["url"], (str, unicode)), attributes["url"]
self._url = attributes["url"]
|
khalibartan/Antidote-DM
|
refs/heads/master
|
Antidotes DM/youtube_dl/extractor/globo.py
|
8
|
# coding: utf-8
from __future__ import unicode_literals
import random
import math
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_chr,
compat_ord,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
str_or_none,
)
class GloboIE(InfoExtractor):
_VALID_URL = '(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s'
_RESIGN_EXPIRATION = 86400
_TESTS = [{
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
'info_dict': {
'id': '3607726',
'ext': 'mp4',
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
'duration': 103.204,
'uploader': 'Globo.com',
'uploader_id': '265',
},
}, {
'url': 'http://globoplay.globo.com/v/4581987/',
'md5': 'f36a1ecd6a50da1577eee6dd17f67eff',
'info_dict': {
'id': '4581987',
'ext': 'mp4',
'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP',
'duration': 137.973,
'uploader': 'Rede Globo',
'uploader_id': '196',
},
}, {
'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html',
'only_matching': True,
}, {
'url': 'http://globosatplay.globo.com/globonews/v/4472924/',
'only_matching': True,
}, {
'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/',
'only_matching': True,
}, {
'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',
'only_matching': True,
}, {
'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html',
'only_matching': True,
}]
class MD5:
HEX_FORMAT_LOWERCASE = 0
HEX_FORMAT_UPPERCASE = 1
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
PADDING = '=0xFF01DD'
hexcase = 0
b64pad = ''
def __init__(self):
pass
class JSArray(list):
def __getitem__(self, y):
try:
return list.__getitem__(self, y)
except IndexError:
return 0
def __setitem__(self, i, y):
try:
return list.__setitem__(self, i, y)
except IndexError:
self.extend([0] * (i - len(self) + 1))
self[-1] = y
@classmethod
def hex_md5(cls, param1):
return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
@classmethod
def b64_md5(cls, param1, param2=None):
return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
@classmethod
def any_md5(cls, param1, param2):
return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
@classmethod
def rstr_md5(cls, param1):
return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
@classmethod
def rstr2hex(cls, param1):
_loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
_loc_3 = ''
for _loc_5 in range(0, len(param1)):
_loc_4 = compat_ord(param1[_loc_5])
_loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
return _loc_3
@classmethod
def rstr2b64(cls, param1):
_loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
_loc_3 = ''
_loc_4 = len(param1)
for _loc_5 in range(0, _loc_4, 3):
_loc_6_1 = compat_ord(param1[_loc_5]) << 16
_loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
_loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
_loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
for _loc_7 in range(0, 4):
if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
_loc_3 += cls.b64pad
else:
_loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
return _loc_3
@staticmethod
def rstr2any(param1, param2):
_loc_3 = len(param2)
_loc_4 = []
_loc_9 = [0] * ((len(param1) >> 2) + 1)
for _loc_5 in range(0, len(_loc_9)):
_loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
while len(_loc_9) > 0:
_loc_8 = []
_loc_7 = 0
for _loc_5 in range(0, len(_loc_9)):
_loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
_loc_6 = math.floor(_loc_7 / _loc_3)
_loc_7 -= _loc_6 * _loc_3
if len(_loc_8) > 0 or _loc_6 > 0:
_loc_8[len(_loc_8)] = _loc_6
_loc_4[len(_loc_4)] = _loc_7
_loc_9 = _loc_8
_loc_10 = ''
_loc_5 = len(_loc_4) - 1
while _loc_5 >= 0:
_loc_10 += param2[_loc_4[_loc_5]]
_loc_5 -= 1
return _loc_10
@classmethod
def str2rstr_utf8(cls, param1, param2=None):
_loc_3 = ''
_loc_4 = -1
if not param2:
param2 = cls.PADDING
param1 = param1 + param2[1:9]
while True:
_loc_4 += 1
if _loc_4 >= len(param1):
break
_loc_5 = compat_ord(param1[_loc_4])
_loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
_loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
_loc_4 += 1
if _loc_5 <= 127:
_loc_3 += compat_chr(_loc_5)
continue
if _loc_5 <= 2047:
_loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
continue
if _loc_5 <= 65535:
_loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
128 | _loc_5 & 63)
continue
if _loc_5 <= 2097151:
_loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
return _loc_3
@staticmethod
def rstr2binl(param1):
_loc_2 = [0] * ((len(param1) >> 2) + 1)
for _loc_3 in range(0, len(_loc_2)):
_loc_2[_loc_3] = 0
for _loc_3 in range(0, len(param1) * 8, 8):
_loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
return _loc_2
@staticmethod
def binl2rstr(param1):
_loc_2 = ''
for _loc_3 in range(0, len(param1) * 32, 8):
_loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
return _loc_2
@classmethod
def binl_md5(cls, param1, param2):
param1 = cls.JSArray(param1)
param1[param2 >> 5] |= 128 << param2 % 32
param1[(param2 + 64 >> 9 << 4) + 14] = param2
_loc_3 = 1732584193
_loc_4 = -271733879
_loc_5 = -1732584194
_loc_6 = 271733878
for _loc_7 in range(0, len(param1), 16):
_loc_8 = _loc_3
_loc_9 = _loc_4
_loc_10 = _loc_5
_loc_11 = _loc_6
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
_loc_3 = cls.safe_add(_loc_3, _loc_8)
_loc_4 = cls.safe_add(_loc_4, _loc_9)
_loc_5 = cls.safe_add(_loc_5, _loc_10)
_loc_6 = cls.safe_add(_loc_6, _loc_11)
return [_loc_3, _loc_4, _loc_5, _loc_6]
@classmethod
def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
return cls.safe_add(
cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
@classmethod
def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
@classmethod
def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
@classmethod
def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
@classmethod
def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
@classmethod
def safe_add(cls, param1, param2):
_loc_3 = (param1 & 65535) + (param2 & 65535)
_loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
return cls.lshift(_loc_4, 16) | _loc_3 & 65535
@classmethod
def bit_rol(cls, param1, param2):
return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
@staticmethod
def lshift(value, count):
r = (0xFFFFFFFF & value) << count
return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
title = video['title']
formats = []
for resource in video['resources']:
resource_id = resource.get('_id')
if not resource_id or resource_id.endswith('manifest'):
continue
security = self._download_json(
self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
video_id, 'Downloading security hash for %s' % resource_id)
security_hash = security.get('hash')
if not security_hash:
message = security.get('message')
if message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, message), expected=True)
continue
hash_code = security_hash[:2]
received_time = int(security_hash[2:12])
received_random = security_hash[12:22]
received_md5 = security_hash[22:]
sign_time = received_time + self._RESIGN_EXPIRATION
padding = '%010d' % random.randint(1, 10000000000)
signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
resource_url = resource['url']
signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(
signed_url, resource_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': signed_url,
'format_id': 'http-%s' % resource_id,
'height': int_or_none(resource.get('height')),
})
self._sort_formats(formats)
duration = float_or_none(video.get('duration'), 1000)
uploader = video.get('channel')
uploader_id = str_or_none(video.get('channel_id'))
return {
'id': video_id,
'title': title,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats
}
class GloboArticleIE(InfoExtractor):
_VALID_URL = 'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_VIDEOID_REGEXES = [
r'\bdata-video-id=["\'](\d{7,})',
r'\bdata-player-videosids=["\'](\d{7,})',
r'\bvideosIDs\s*:\s*["\'](\d{7,})',
r'\bdata-id=["\'](\d{7,})',
r'<div[^>]+\bid=["\'](\d{7,})',
]
_TESTS = [{
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
'info_dict': {
'id': '3652183',
'ext': 'mp4',
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
'duration': 110.711,
'uploader': 'Rede Globo',
'uploader_id': '196',
}
}, {
'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html',
'only_matching': True,
}, {
'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
return self.url_result('globo:%s' % video_id, 'Globo')
|
todaychi/hue
|
refs/heads/master
|
desktop/libs/libsentry/src/libsentry/client2.py
|
7
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from desktop.lib import thrift_util
from sentry_generic_policy_service import SentryGenericPolicyService
from sentry_generic_policy_service.ttypes import TListSentryRolesRequest, TListSentryPrivilegesRequest, TAuthorizable, TCreateSentryRoleRequest, \
TDropSentryRoleRequest, TAlterSentryRoleGrantPrivilegeRequest, TSentryPrivilege, TAlterSentryRoleGrantPrivilegeResponse, \
TAlterSentryRoleRevokePrivilegeRequest, TAlterSentryRoleAddGroupsRequest, TAlterSentryRoleDeleteGroupsRequest, \
TListSentryPrivilegesForProviderRequest, TSentryActiveRoleSet, TDropPrivilegesRequest, TRenamePrivilegesRequest, \
TListSentryPrivilegesByAuthRequest
from libsentry.sentry_site import get_sentry_server_authentication,\
get_sentry_server_principal
LOG = logging.getLogger(__name__)
"""
struct TAuthorizable {
1: required string type,
2: required string name
}
struct TSentryPrivilege {
1: required string component,
2: required string serviceName,
3: required list<TAuthorizable> authorizables,
4: required string action,
5: optional i64 createTime, # Set on server side
6: optional string grantorPrincipal, # Set on server side
7: optional TSentryGrantOption grantOption = sentry_policy_service.TSentryGrantOption.FALSE
}
"""
class SentryClient(object):
SENTRY_MECHANISMS = {'KERBEROS': 'GSSAPI', 'NOSASL': 'NOSASL', 'NONE': 'NONE'}
def __init__(self, host, port, username, component='hive'):
self.username = username
self.host = host
self.port = port
self.security = self._get_security()
self.component = component
self.client = thrift_util.get_client(
SentryGenericPolicyService.Client,
host,
port,
service_name="SentryGenericPolicyService",
username=self.username,
timeout_seconds=30,
multiple=True,
kerberos_principal=self.security['kerberos_principal_short_name'],
use_sasl=self.security['use_sasl'],
mechanism=self.security['mechanism']
)
def __str__(self):
return ', '.join(map(str, [self.host, self.port, self.component, self.username, self.security]))
def _get_security(self):
principal = get_sentry_server_principal()
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
use_sasl = get_sentry_server_authentication() == 'KERBEROS'
mechanism = SentryClient.SENTRY_MECHANISMS[get_sentry_server_authentication()]
return {
'kerberos_principal_short_name': kerberos_principal_short_name,
'use_sasl': use_sasl,
'mechanism': mechanism
}
def create_sentry_role(self, roleName):
request = TCreateSentryRoleRequest(requestorUserName=self.username, component=self.component, roleName=roleName)
return self.client.create_sentry_role(request)
def drop_sentry_role(self, roleName):
request = TDropSentryRoleRequest(requestorUserName=self.username, component=self.component, roleName=roleName)
return self.client.drop_sentry_role(request)
def alter_sentry_role_grant_privilege(self, roleName, tSentryPrivilege):
tSentryPrivilege['authorizables'] = [TAuthorizable(type=_auth['type'], name=_auth['name']) for _auth in tSentryPrivilege['authorizables']]
tSentryPrivilege = TSentryPrivilege(**tSentryPrivilege)
request = TAlterSentryRoleGrantPrivilegeRequest(requestorUserName=self.username, component=self.component, roleName=roleName, privilege=tSentryPrivilege)
return self.client.alter_sentry_role_grant_privilege(request)
def alter_sentry_role_revoke_privilege(self, roleName, tSentryPrivilege):
if tSentryPrivilege is not None:
tSentryPrivilege['authorizables'] = [TAuthorizable(type=_auth['type'], name=_auth['name']) for _auth in tSentryPrivilege['authorizables']]
tSentryPrivilege = TSentryPrivilege(**tSentryPrivilege)
request = TAlterSentryRoleRevokePrivilegeRequest(requestorUserName=self.username, component=self.component, roleName=roleName, privilege=tSentryPrivilege)
return self.client.alter_sentry_role_revoke_privilege(request)
def alter_sentry_role_add_groups(self, roleName, groups):
request = TAlterSentryRoleAddGroupsRequest(requestorUserName=self.username, component=self.component, roleName=roleName, groups=groups)
return self.client.alter_sentry_role_add_groups(request)
def alter_sentry_role_delete_groups(self, roleName, groups):
request = TAlterSentryRoleDeleteGroupsRequest(requestorUserName=self.username, component=self.component, roleName=roleName, groups=groups)
return self.client.alter_sentry_role_delete_groups(request)
def list_sentry_roles_by_group(self, groupName=None):
request = TListSentryRolesRequest(requestorUserName=self.username, component=self.component, groupName=groupName)
return self.client.list_sentry_roles_by_group(request)
def list_sentry_privileges_by_role(self, serviceName, roleName, authorizables=None):
if authorizables is not None:
authorizables = TAuthorizable(**authorizables)
request = TListSentryPrivilegesRequest(requestorUserName=self.username, component=self.component, roleName=roleName, serviceName=serviceName, authorizables=authorizables)
return self.client.list_sentry_privileges_by_role(request)
def drop_sentry_privilege(self, authorizable):
authorizable = TAuthorizable(**authorizable)
request = TDropPrivilegesRequest(requestorUserName=self.username, component=self.component, authorizable=authorizable)
return self.client.drop_sentry_privilege(request)
def rename_sentry_privilege(self, oldAuthorizable, newAuthorizable):
oldAuthorizable = TAuthorizable(**oldAuthorizable)
newAuthorizable = TAuthorizable(**newAuthorizable)
request = TRenamePrivilegesRequest(requestorUserName=self.username, component=self.component, oldAuthorizable=oldAuthorizable, newAuthorizable=newAuthorizable)
return self.client.rename_sentry_privilege(request)
def list_sentry_privileges_for_provider(self, groups, roleSet=None, authorizableHierarchy=None):
"""
struct TSentryActiveRoleSet {
1: required bool all,
2: required set<string> roles,
}
struct TListSentryPrivilegesForProviderResponse {
1: required sentry_common_service.TSentryResponseStatus status
2: required set<string> privileges
}
"""
if roleSet is not None:
roleSet = TSentryActiveRoleSet(**roleSet)
if authorizableHierarchy is not None:
authorizableHierarchy = TAuthorizable(**authorizableHierarchy)
request = TListSentryPrivilegesForProviderRequest(component=self.component, groups=groups, roleSet=roleSet, authorizableHierarchy=authorizableHierarchy)
return self.client.list_sentry_privileges_for_provider(request)
def list_sentry_privileges_by_authorizable(self, serviceName, authorizableSet, groups=None, roleSet=None):
authorizableSet = ['%s=%s' % (_auth['type'], _auth['name']) for _authSet in authorizableSet for _auth in _authSet['authorizables']]
if roleSet is not None:
roleSet = TSentryActiveRoleSet(**roleSet)
request = TListSentryPrivilegesByAuthRequest(requestorUserName=self.username, component=self.component, serviceName=serviceName, authorizablesSet=set(authorizableSet), groups=groups, roleSet=roleSet)
return self.client.list_sentry_privileges_by_authorizable(request)
|
duniter/duniter-python-api
|
refs/heads/master
|
duniterpy/api/bma/tx.py
|
2
|
"""
Copyright 2014-2021 Vincent Texier <vit@free.fr>
DuniterPy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DuniterPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from aiohttp import ClientResponse
from duniterpy.api.client import Client, RESPONSE_AIOHTTP
logger = logging.getLogger("duniter/tx")
MODULE = "tx"
HISTORY_SCHEMA = {
"type": "object",
"properties": {
"currency": {"type": "string"},
"pubkey": {"type": "string"},
"history": {
"type": "object",
"properties": {
"sent": {"$ref": "#/definitions/transaction_data"},
"received": {"$ref": "#/definitions/transaction_data"},
"sending": {"$ref": "#/definitions/transactioning_data"},
"receiving": {"$ref": "#/definitions/transactioning_data"},
"pending": {"$ref": "#/definitions/transactioning_data"},
},
"required": ["sent", "received", "sending", "receiving", "pending"],
},
},
"definitions": {
"transaction_data": {
"type": "array",
"items": {
"type": "object",
"properties": {
"version": {"type": "number"},
"issuers": {"type": "array", "items": {"type": "string"}},
"inputs": {"type": "array", "items": {"type": "string"}},
"outputs": {"type": "array", "items": {"type": "string"}},
"unlocks": {"type": "array", "items": {"type": "string"}},
"comment": {"type": "string"},
"signatures": {"type": "array", "items": {"type": "string"}},
"hash": {"type": "string"},
"block_number": {"type": "number"},
"time": {"type": "number"},
},
"required": [
"version",
"issuers",
"inputs",
"outputs",
"comment",
"signatures",
"hash",
"block_number",
"time",
],
},
},
"transactioning_data": {
"type": "array",
"items": {
"type": "object",
"properties": {
"version": {"type": "number"},
"issuers": {"type": "array", "items": {"type": "string"}},
"inputs": {"type": "array", "items": {"type": "string"}},
"outputs": {"type": "array", "items": {"type": "string"}},
"unlocks": {"type": "array", "items": {"type": "string"}},
"comment": {"type": "string"},
"signatures": {"type": "array", "items": {"type": "string"}},
"hash": {"type": "string"},
},
"required": [
"version",
"issuers",
"inputs",
"outputs",
"comment",
"signatures",
"hash",
],
},
},
},
"required": ["currency", "pubkey", "history"],
}
SOURCES_SCHEMA = {
"type": "object",
"properties": {
"currency": {"type": "string"},
"pubkey": {"type": "string"},
"sources": {
"type": "array",
"items": {
"type": "object",
"properties": {
"type": {"type": "string"},
"noffset": {"type": "number"},
"identifier": {"type": "string"},
"amount": {"type": "number"},
"base": {"type": "number"},
},
"required": ["type", "noffset", "identifier", "amount", "base"],
},
},
},
"required": ["currency", "pubkey", "sources"],
}
async def history(client: Client, pubkey: str) -> dict:
"""
Get transactions history of public key
:param client: Client to connect to the api
:param pubkey: Public key
:return:
"""
return await client.get(MODULE + "/history/%s" % pubkey, schema=HISTORY_SCHEMA)
async def process(client: Client, transaction_signed_raw: str) -> ClientResponse:
"""
POST a transaction raw document
:param client: Client to connect to the api
:param transaction_signed_raw: Transaction signed raw document
:return:
"""
return await client.post(
MODULE + "/process",
{"transaction": transaction_signed_raw},
rtype=RESPONSE_AIOHTTP,
)
async def sources(client: Client, pubkey: str) -> dict:
"""
GET transaction sources
:param client: Client to connect to the api
:param pubkey: Public key
:return:
"""
return await client.get(MODULE + "/sources/%s" % pubkey, schema=SOURCES_SCHEMA)
async def pending(client: Client, pubkey: str) -> dict:
"""
GET pending transaction history for the given pubkey
:param client: Client to connect to the api
:param pubkey: Public key
:return:
"""
return await client.get(
MODULE + "/history/%s/pending" % pubkey, schema=HISTORY_SCHEMA
)
async def blocks(client: Client, pubkey: str, start: int, end: int) -> dict:
"""
GET public key transactions history between start and end block number
:param client: Client to connect to the api
:param pubkey: Public key
:param start: Start from block number
:param end: End to block number
:return:
"""
return await client.get(
MODULE + "/history/%s/blocks/%s/%s" % (pubkey, start, end),
schema=HISTORY_SCHEMA,
)
async def times(client: Client, pubkey: str, start: int, end: int) -> dict:
"""
GET public key transactions history between start and end timestamp
:param client: Client to connect to the api
:param pubkey: Public key
:param start: Start from timestamp
:param end: End to timestamp
:return:
"""
return await client.get(
MODULE + "/history/%s/times/%s/%s" % (pubkey, start, end), schema=HISTORY_SCHEMA
)
|
berendkleinhaneveld/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/test/test_abstract.py
|
46
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for generic file descriptor based reactor support code.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import TestCase
from twisted.internet.abstract import isIPAddress
class AddressTests(TestCase):
"""
Tests for address-related functionality.
"""
def test_decimalDotted(self):
"""
L{isIPAddress} should return C{True} for any decimal dotted
representation of an IPv4 address.
"""
self.assertTrue(isIPAddress('0.1.2.3'))
self.assertTrue(isIPAddress('252.253.254.255'))
def test_shortDecimalDotted(self):
"""
L{isIPAddress} should return C{False} for a dotted decimal
representation with fewer or more than four octets.
"""
self.assertFalse(isIPAddress('0'))
self.assertFalse(isIPAddress('0.1'))
self.assertFalse(isIPAddress('0.1.2'))
self.assertFalse(isIPAddress('0.1.2.3.4'))
def test_invalidLetters(self):
"""
L{isIPAddress} should return C{False} for any non-decimal dotted
representation including letters.
"""
self.assertFalse(isIPAddress('a.2.3.4'))
self.assertFalse(isIPAddress('1.b.3.4'))
def test_invalidPunctuation(self):
"""
L{isIPAddress} should return C{False} for a string containing
strange punctuation.
"""
self.assertFalse(isIPAddress(','))
self.assertFalse(isIPAddress('1,2'))
self.assertFalse(isIPAddress('1,2,3'))
self.assertFalse(isIPAddress('1.,.3,4'))
def test_emptyString(self):
"""
L{isIPAddress} should return C{False} for the empty string.
"""
self.assertFalse(isIPAddress(''))
def test_invalidNegative(self):
"""
L{isIPAddress} should return C{False} for negative decimal values.
"""
self.assertFalse(isIPAddress('-1'))
self.assertFalse(isIPAddress('1.-2'))
self.assertFalse(isIPAddress('1.2.-3'))
self.assertFalse(isIPAddress('1.2.-3.4'))
def test_invalidPositive(self):
"""
L{isIPAddress} should return C{False} for a string containing
positive decimal values greater than 255.
"""
self.assertFalse(isIPAddress('256.0.0.0'))
self.assertFalse(isIPAddress('0.256.0.0'))
self.assertFalse(isIPAddress('0.0.256.0'))
self.assertFalse(isIPAddress('0.0.0.256'))
self.assertFalse(isIPAddress('256.256.256.256'))
|
ttuygun/VirusTotal-Dropzone
|
refs/heads/master
|
VirusTotal Virus Scan.dzbundle/requests/packages/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
fergalmoran/dss
|
refs/heads/master
|
spa/management/commands/waveforms.py
|
1
|
from optparse import make_option
import os
from django.core.management.base import NoArgsCommand, BaseCommand
from spa.models.mix import Mix
from core.tasks import create_waveform_task
class Command(BaseCommand):
help = "Generate all outstanding waveforms"
option_list = BaseCommand.option_list + (
make_option('--nocelery',
action='store_true',
dest='nocelery',
default=False,
help='Dispatch calls to celery broker'),
)
@staticmethod
def _get_file(mix):
#Check for file in mix directory
processed_file = ""
try:
processed_file = mix.get_absolute_path()
if not os.path.isfile(processed_file):
processed_file = mix.get_cache_path()
if not os.path.isfile(processed_file):
print "File for [%s] not found tried\n\t%s\n\t%s" % (mix.title, processed_file, processed_file)
return ""
except Exception, ex:
print "Error generating waveform: %s" % ex.message
return processed_file
def handle(self, *args, **options):
print "Scanning for missing waveforms"
unprocessed = Mix.objects.filter(waveform_generated=False)
for mix in unprocessed:
print "Found %s" % mix.slug
mix_file = self._get_file(mix)
if mix_file is not "":
if options['nocelery']:
create_waveform_task(in_file=mix_file, uid=mix.uid)
else:
create_waveform_task.delay(in_file=mix_file, uid=mix.uid)
|
rkokkelk/CouchPotatoServer
|
refs/heads/master
|
libs/suds/sax/element.py
|
176
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{element} classes.
"""
from logging import getLogger
from suds import *
from suds.sax import *
from suds.sax.text import Text
from suds.sax.attribute import Attribute
import sys
if sys.version_info < (2, 4, 0):
from sets import Set as set
del sys
log = getLogger(__name__)
class Element:
"""
An XML element object.
@ivar parent: The node containing this attribute
@type parent: L{Element}
@ivar prefix: The I{optional} namespace prefix.
@type prefix: basestring
@ivar name: The I{unqualified} name of the attribute
@type name: basestring
@ivar expns: An explicit namespace (xmlns="...").
@type expns: (I{prefix}, I{name})
@ivar nsprefixes: A mapping of prefixes to namespaces.
@type nsprefixes: dict
@ivar attributes: A list of XML attributes.
@type attributes: [I{Attribute},]
@ivar text: The element's I{text} content.
@type text: basestring
@ivar children: A list of child elements.
@type children: [I{Element},]
@cvar matcher: A collection of I{lambda} for string matching.
@cvar specialprefixes: A dictionary of builtin-special prefixes.
"""
matcher = \
{
'eq': lambda a,b: a == b,
'startswith' : lambda a,b: a.startswith(b),
'endswith' : lambda a,b: a.endswith(b),
'contains' : lambda a,b: b in a
}
specialprefixes = { Namespace.xmlns[0] : Namespace.xmlns[1] }
@classmethod
def buildPath(self, parent, path):
"""
Build the specifed pat as a/b/c where missing intermediate nodes are built
automatically.
@param parent: A parent element on which the path is built.
@type parent: I{Element}
@param path: A simple path separated by (/).
@type path: basestring
@return: The leaf node of I{path}.
@rtype: L{Element}
"""
for tag in path.split('/'):
child = parent.getChild(tag)
if child is None:
child = Element(tag, parent)
parent = child
return child
def __init__(self, name, parent=None, ns=None):
"""
@param name: The element's (tag) name. May cotain a prefix.
@type name: basestring
@param parent: An optional parent element.
@type parent: I{Element}
@param ns: An optional namespace
@type ns: (I{prefix}, I{name})
"""
self.rename(name)
self.expns = None
self.nsprefixes = {}
self.attributes = []
self.text = None
if parent is not None:
if isinstance(parent, Element):
self.parent = parent
else:
raise Exception('parent (%s) not-valid', parent.__class__.__name__)
else:
self.parent = None
self.children = []
self.applyns(ns)
def rename(self, name):
"""
Rename the element.
@param name: A new name for the element.
@type name: basestring
"""
if name is None:
raise Exception('name (%s) not-valid' % name)
else:
self.prefix, self.name = splitPrefix(name)
def setPrefix(self, p, u=None):
"""
Set the element namespace prefix.
@param p: A new prefix for the element.
@type p: basestring
@param u: A namespace URI to be mapped to the prefix.
@type u: basestring
@return: self
@rtype: L{Element}
"""
self.prefix = p
if p is not None and u is not None:
self.addPrefix(p, u)
return self
def qname(self):
"""
Get the B{fully} qualified name of this element
@return: The fully qualified name.
@rtype: basestring
"""
if self.prefix is None:
return self.name
else:
return '%s:%s' % (self.prefix, self.name)
def getRoot(self):
"""
Get the root (top) node of the tree.
@return: The I{top} node of this tree.
@rtype: I{Element}
"""
if self.parent is None:
return self
else:
return self.parent.getRoot()
def clone(self, parent=None):
"""
Deep clone of this element and children.
@param parent: An optional parent for the copied fragment.
@type parent: I{Element}
@return: A deep copy parented by I{parent}
@rtype: I{Element}
"""
root = Element(self.qname(), parent, self.namespace())
for a in self.attributes:
root.append(a.clone(self))
for c in self.children:
root.append(c.clone(self))
for item in self.nsprefixes.items():
root.addPrefix(item[0], item[1])
return root
def detach(self):
"""
Detach from parent.
@return: This element removed from its parent's
child list and I{parent}=I{None}
@rtype: L{Element}
"""
if self.parent is not None:
if self in self.parent.children:
self.parent.children.remove(self)
self.parent = None
return self
def set(self, name, value):
"""
Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__()
"""
attr = self.getAttribute(name)
if attr is None:
attr = Attribute(name, value)
self.append(attr)
else:
attr.setValue(value)
def unset(self, name):
"""
Unset (remove) an attribute.
@param name: The attribute name.
@type name: str
@return: self
@rtype: L{Element}
"""
try:
attr = self.getAttribute(name)
self.attributes.remove(attr)
except:
pass
return self
def get(self, name, ns=None, default=None):
"""
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either
the attribute does not exist of has not value.
@type default: basestring
@return: The attribute's value or I{default}
@rtype: basestring
@see: __getitem__()
"""
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
else:
return attr.getValue()
def setText(self, value):
"""
Set the element's L{Text} content.
@param value: The element's text value.
@type value: basestring
@return: self
@rtype: I{Element}
"""
if isinstance(value, Text):
self.text = value
else:
self.text = Text(value)
return self
def getText(self, default=None):
"""
Get the element's L{Text} content with optional default
@param default: A value to be returned when no text content exists.
@type default: basestring
@return: The text content, or I{default}
@rtype: L{Text}
"""
if self.hasText():
return self.text
else:
return default
def trim(self):
"""
Trim leading and trailing whitespace.
@return: self
@rtype: L{Element}
"""
if self.hasText():
self.text = self.text.trim()
return self
def hasText(self):
"""
Get whether the element has I{text} and that it is not an empty
(zero length) string.
@return: True when has I{text}.
@rtype: boolean
"""
return ( self.text is not None and len(self.text) )
def namespace(self):
"""
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return self.defaultNamespace()
else:
return self.resolvePrefix(self.prefix)
def defaultNamespace(self):
"""
Get the default (unqualified namespace).
This is the expns of the first node (looking up the tree)
that has it set.
@return: The namespace of a node when not qualified.
@rtype: (I{prefix}, I{name})
"""
p = self
while p is not None:
if p.expns is not None:
return (None, p.expns)
else:
p = p.parent
return Namespace.default
def append(self, objects):
"""
Append the specified child based on whether it is an
element or an attrbuite.
@param objects: A (single|collection) of attribute(s) or element(s)
to be added as children.
@type objects: (L{Element}|L{Attribute})
@return: self
@rtype: L{Element}
"""
if not isinstance(objects, (list, tuple)):
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.append(child)
child.parent = self
continue
if isinstance(child, Attribute):
self.attributes.append(child)
child.parent = self
continue
raise Exception('append %s not-valid' % child.__class__.__name__)
return self
def insert(self, objects, index=0):
"""
Insert an L{Element} content at the specified index.
@param objects: A (single|collection) of attribute(s) or element(s)
to be added as children.
@type objects: (L{Element}|L{Attribute})
@param index: The position in the list of children to insert.
@type index: int
@return: self
@rtype: L{Element}
"""
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.insert(index, child)
child.parent = self
else:
raise Exception('append %s not-valid' % child.__class__.__name__)
return self
def remove(self, child):
"""
Remove the specified child element or attribute.
@param child: A child to remove.
@type child: L{Element}|L{Attribute}
@return: The detached I{child} when I{child} is an element, else None.
@rtype: L{Element}|None
"""
if isinstance(child, Element):
return child.detach()
if isinstance(child, Attribute):
self.attributes.remove(child)
return None
def replaceChild(self, child, content):
"""
Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},]
"""
if child not in self.children:
raise Exception('child not-found')
index = self.children.index(child)
self.remove(child)
if not isinstance(content, (list, tuple)):
content = (content,)
for node in content:
self.children.insert(index, node.detach())
node.parent = self
index += 1
def getAttribute(self, name, ns=None, default=None):
"""
Get an attribute by name and (optional) namespace
@param name: The name of a contained attribute (may contain prefix).
@type name: basestring
@param ns: An optional namespace
@type ns: (I{prefix}, I{name})
@param default: Returned when attribute not-found.
@type default: L{Attribute}
@return: The requested attribute object.
@rtype: L{Attribute}
"""
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
for a in self.attributes:
if a.match(name, ns):
return a
return default
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
for c in self.children:
if c.match(name, ns):
return c
return default
def childAtPath(self, path):
"""
Get a child at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The leaf node at the end of I{path}
@rtype: L{Element}
"""
result = None
node = self
for name in [p for p in path.split('/') if len(p) > 0]:
ns = None
prefix, name = splitPrefix(name)
if prefix is not None:
ns = node.resolvePrefix(prefix)
result = node.getChild(name, ns)
if result is None:
break;
else:
node = result
return result
def childrenAtPath(self, path):
"""
Get a list of children at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The collection leaf nodes at the end of I{path}
@rtype: [L{Element},...]
"""
parts = [p for p in path.split('/') if len(p) > 0]
if len(parts) == 1:
result = self.getChildren(path)
else:
result = self.__childrenAtPath(parts)
return result
def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)]
def detachChildren(self):
"""
Detach and return this element's children.
@return: The element's children (detached).
@rtype: [L{Element},...]
"""
detached = self.children
self.children = []
for child in detached:
child.parent = None
return detached
def resolvePrefix(self, prefix, default=Namespace.default):
"""
Resolve the specified prefix to a namespace. The I{nsprefixes} is
searched. If not found, it walks up the tree until either resolved or
the top of the tree is reached. Searching up the tree provides for
inherited mappings.
@param prefix: A namespace prefix to resolve.
@type prefix: basestring
@param default: An optional value to be returned when the prefix
cannot be resolved.
@type default: (I{prefix},I{URI})
@return: The namespace that is mapped to I{prefix} in this context.
@rtype: (I{prefix},I{URI})
"""
n = self
while n is not None:
if prefix in n.nsprefixes:
return (prefix, n.nsprefixes[prefix])
if prefix in self.specialprefixes:
return (prefix, self.specialprefixes[prefix])
n = n.parent
return default
def addPrefix(self, p, u):
"""
Add or update a prefix mapping.
@param p: A prefix.
@type p: basestring
@param u: A namespace URI.
@type u: basestring
@return: self
@rtype: L{Element}
"""
self.nsprefixes[p] = u
return self
def updatePrefix(self, p, u):
"""
Update (redefine) a prefix mapping for the branch.
@param p: A prefix.
@type p: basestring
@param u: A namespace URI.
@type u: basestring
@return: self
@rtype: L{Element}
@note: This method traverses down the entire branch!
"""
if p in self.nsprefixes:
self.nsprefixes[p] = u
for c in self.children:
c.updatePrefix(p, u)
return self
def clearPrefix(self, prefix):
"""
Clear the specified prefix from the prefix mappings.
@param prefix: A prefix to clear.
@type prefix: basestring
@return: self
@rtype: L{Element}
"""
if prefix in self.nsprefixes:
del self.nsprefixes[prefix]
return self
def findPrefix(self, uri, default=None):
"""
Find the first prefix that has been mapped to a namespace URI.
The local mapping is searched, then it walks up the tree until
it reaches the top or finds a match.
@param uri: A namespace URI.
@type uri: basestring
@param default: A default prefix when not found.
@type default: basestring
@return: A mapped prefix.
@rtype: basestring
"""
for item in self.nsprefixes.items():
if item[1] == uri:
prefix = item[0]
return prefix
for item in self.specialprefixes.items():
if item[1] == uri:
prefix = item[0]
return prefix
if self.parent is not None:
return self.parent.findPrefix(uri, default)
else:
return default
def findPrefixes(self, uri, match='eq'):
"""
Find all prefixes that has been mapped to a namespace URI.
The local mapping is searched, then it walks up the tree until
it reaches the top collecting all matches.
@param uri: A namespace URI.
@type uri: basestring
@param match: A matching function L{Element.matcher}.
@type match: basestring
@return: A list of mapped prefixes.
@rtype: [basestring,...]
"""
result = []
for item in self.nsprefixes.items():
if self.matcher[match](item[1], uri):
prefix = item[0]
result.append(prefix)
for item in self.specialprefixes.items():
if self.matcher[match](item[1], uri):
prefix = item[0]
result.append(prefix)
if self.parent is not None:
result += self.parent.findPrefixes(uri, match)
return result
def promotePrefixes(self):
"""
Push prefix declarations up the tree as far as possible. Prefix
mapping are pushed to its parent unless the parent has the
prefix mapped to another URI or the parent has the prefix.
This is propagated up the tree until the top is reached.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.promotePrefixes()
if self.parent is None:
return
for p,u in self.nsprefixes.items():
if p in self.parent.nsprefixes:
pu = self.parent.nsprefixes[p]
if pu == u:
del self.nsprefixes[p]
continue
if p != self.parent.prefix:
self.parent.nsprefixes[p] = u
del self.nsprefixes[p]
return self
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self
def normalizePrefixes(self):
"""
Normalize the namespace prefixes.
This generates unique prefixes for all namespaces. Then retrofits all
prefixes and prefix mappings. Further, it will retrofix attribute values
that have values containing (:).
@return: self
@rtype: L{Element}
"""
PrefixNormalizer.apply(self)
return self
def isempty(self, content=True):
"""
Get whether the element has no children.
@param content: Test content (children & text) only.
@type content: boolean
@return: True when element has not children.
@rtype: boolean
"""
noattrs = not len(self.attributes)
nochildren = not len(self.children)
notext = ( self.text is None )
nocontent = ( nochildren and notext )
if content:
return nocontent
else:
return ( nocontent and noattrs )
def isnil(self):
"""
Get whether the element is I{nil} as defined by having
an attribute in the I{xsi:nil="true"}
@return: True if I{nil}, else False
@rtype: boolean
"""
nilattr = self.getAttribute('nil', ns=Namespace.xsins)
if nilattr is None:
return False
else:
return ( nilattr.getValue().lower() == 'true' )
def setnil(self, flag=True):
"""
Set this node to I{nil} as defined by having an
attribute I{xsi:nil}=I{flag}.
@param flag: A flag inidcating how I{xsi:nil} will be set.
@type flag: boolean
@return: self
@rtype: L{Element}
"""
p, u = Namespace.xsins
name = ':'.join((p, 'nil'))
self.set(name, str(flag).lower())
self.addPrefix(p, u)
if flag:
self.text = None
return self
def applyns(self, ns):
"""
Apply the namespace to this node. If the prefix is I{None} then
this element's explicit namespace I{expns} is set to the
URI defined by I{ns}. Otherwise, the I{ns} is simply mapped.
@param ns: A namespace.
@type ns: (I{prefix},I{URI})
"""
if ns is None:
return
if not isinstance(ns, (tuple,list)):
raise Exception('namespace must be tuple')
if ns[0] is None:
self.expns = ns[1]
else:
self.prefix = ns[0]
self.nsprefixes[ns[0]] = ns[1]
def str(self, indent=0):
"""
Get a string representation of this XML fragment.
@param indent: The indent to be used in formatting the output.
@type indent: int
@return: A I{pretty} string.
@rtype: basestring
"""
tab = '%*s'%(indent*3,'')
result = []
result.append('%s<%s' % (tab, self.qname()))
result.append(self.nsdeclarations())
for a in [unicode(a) for a in self.attributes]:
result.append(' %s' % a)
if self.isempty():
result.append('/>')
return ''.join(result)
result.append('>')
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append('\n')
result.append(c.str(indent+1))
if len(self.children):
result.append('\n%s' % tab)
result.append('</%s>' % self.qname())
result = ''.join(result)
return result
def plain(self):
"""
Get a string representation of this XML fragment.
@return: A I{plain} string.
@rtype: basestring
"""
result = []
result.append('<%s' % self.qname())
result.append(self.nsdeclarations())
for a in [unicode(a) for a in self.attributes]:
result.append(' %s' % a)
if self.isempty():
result.append('/>')
return ''.join(result)
result.append('>')
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append(c.plain())
result.append('</%s>' % self.qname())
result = ''.join(result)
return result
def nsdeclarations(self):
"""
Get a string representation for all namespace declarations
as xmlns="" and xmlns:p="".
@return: A separated list of declarations.
@rtype: basestring
"""
s = []
myns = (None, self.expns)
if self.parent is None:
pns = Namespace.default
else:
pns = (None, self.parent.expns)
if myns[1] != pns[1]:
if self.expns is not None:
d = ' xmlns="%s"' % self.expns
s.append(d)
for item in self.nsprefixes.items():
(p,u) = item
if self.parent is not None:
ns = self.parent.resolvePrefix(p)
if ns[1] == u: continue
d = ' xmlns:%s="%s"' % (p, u)
s.append(d)
return ''.join(s)
def match(self, name=None, ns=None):
"""
Match by (optional) name and/or (optional) namespace.
@param name: The optional element tag name.
@type name: str
@param ns: An optional namespace.
@type ns: (I{prefix}, I{name})
@return: True if matched.
@rtype: boolean
"""
if name is None:
byname = True
else:
byname = ( self.name == name )
if ns is None:
byns = True
else:
byns = ( self.namespace()[1] == ns[1] )
return ( byname and byns )
def branch(self):
"""
Get a flattened representation of the branch.
@return: A flat list of nodes.
@rtype: [L{Element},..]
"""
branch = [self]
for c in self.children:
branch += c.branch()
return branch
def ancestors(self):
"""
Get a list of ancestors.
@return: A list of ancestors.
@rtype: [L{Element},..]
"""
ancestors = []
p = self.parent
while p is not None:
ancestors.append(p)
p = p.parent
return ancestors
def walk(self, visitor):
"""
Walk the branch and call the visitor function
on each node.
@param visitor: A function.
@return: self
@rtype: L{Element}
"""
visitor(self)
for c in self.children:
c.walk(visitor)
return self
def prune(self):
"""
Prune the branch of empty nodes.
"""
pruned = []
for c in self.children:
c.prune()
if c.isempty(False):
pruned.append(c)
for p in pruned:
self.children.remove(p)
def __childrenAtPath(self, parts):
result = []
node = self
last = len(parts)-1
ancestors = parts[:last]
leaf = parts[last]
for name in ancestors:
ns = None
prefix, name = splitPrefix(name)
if prefix is not None:
ns = node.resolvePrefix(prefix)
child = node.getChild(name, ns)
if child is None:
break
else:
node = child
if child is not None:
ns = None
prefix, leaf = splitPrefix(leaf)
if prefix is not None:
ns = node.resolvePrefix(prefix)
result = child.getChildren(leaf)
return result
def __len__(self):
return len(self.children)
def __getitem__(self, index):
if isinstance(index, basestring):
return self.get(index)
else:
if index < len(self.children):
return self.children[index]
else:
return None
def __setitem__(self, index, value):
if isinstance(index, basestring):
self.set(index, value)
else:
if index < len(self.children) and \
isinstance(value, Element):
self.children.insert(index, value)
def __eq__(self, rhs):
return rhs is not None and \
isinstance(rhs, Element) and \
self.name == rhs.name and \
self.namespace()[1] == rhs.namespace()[1]
def __repr__(self):
return \
'Element (prefix=%s, name=%s)' % (self.prefix, self.name)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
def __iter__(self):
return NodeIterator(self)
class NodeIterator:
"""
The L{Element} child node iterator.
@ivar pos: The current position
@type pos: int
@ivar children: A list of a child nodes.
@type children: [L{Element},..]
"""
def __init__(self, parent):
"""
@param parent: An element to iterate.
@type parent: L{Element}
"""
self.pos = 0
self.children = parent.children
def next(self):
"""
Get the next child.
@return: The next child.
@rtype: L{Element}
@raise StopIterator: At the end.
"""
try:
child = self.children[self.pos]
self.pos += 1
return child
except:
raise StopIteration()
class PrefixNormalizer:
"""
The prefix normalizer provides namespace prefix normalization.
@ivar node: A node to normalize.
@type node: L{Element}
@ivar branch: The nodes flattened branch.
@type branch: [L{Element},..]
@ivar namespaces: A unique list of namespaces (URI).
@type namespaces: [str,]
@ivar prefixes: A reverse dict of prefixes.
@type prefixes: {u, p}
"""
@classmethod
def apply(cls, node):
"""
Normalize the specified node.
@param node: A node to normalize.
@type node: L{Element}
@return: The normalized node.
@rtype: L{Element}
"""
pn = PrefixNormalizer(node)
return pn.refit()
def __init__(self, node):
"""
@param node: A node to normalize.
@type node: L{Element}
"""
self.node = node
self.branch = node.branch()
self.namespaces = self.getNamespaces()
self.prefixes = self.genPrefixes()
def getNamespaces(self):
"""
Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set
"""
s = set()
for n in self.branch + self.node.ancestors():
if self.permit(n.expns):
s.add(n.expns)
s = s.union(self.pset(n))
return s
def pset(self, n):
"""
Convert the nodes nsprefixes into a set.
@param n: A node.
@type n: L{Element}
@return: A set of namespaces.
@rtype: set
"""
s = set()
for ns in n.nsprefixes.items():
if self.permit(ns):
s.add(ns[1])
return s
def genPrefixes(self):
"""
Generate a I{reverse} mapping of unique prefixes for all namespaces.
@return: A referse dict of prefixes.
@rtype: {u, p}
"""
prefixes = {}
n = 0
for u in self.namespaces:
p = 'ns%d' % n
prefixes[u] = p
n += 1
return prefixes
def refit(self):
"""
Refit (normalize) the prefixes in the node.
"""
self.refitNodes()
self.refitMappings()
def refitNodes(self):
"""
Refit (normalize) all of the nodes in the branch.
"""
for n in self.branch:
if n.prefix is not None:
ns = n.namespace()
if self.permit(ns):
n.prefix = self.prefixes[ns[1]]
self.refitAttrs(n)
def refitAttrs(self, n):
"""
Refit (normalize) all of the attributes in the node.
@param n: A node.
@type n: L{Element}
"""
for a in n.attributes:
self.refitAddr(a)
def refitAddr(self, a):
"""
Refit (normalize) the attribute.
@param a: An attribute.
@type a: L{Attribute}
"""
if a.prefix is not None:
ns = a.namespace()
if self.permit(ns):
a.prefix = self.prefixes[ns[1]]
self.refitValue(a)
def refitValue(self, a):
"""
Refit (normalize) the attribute's value.
@param a: An attribute.
@type a: L{Attribute}
"""
p,name = splitPrefix(a.getValue())
if p is None: return
ns = a.resolvePrefix(p)
if self.permit(ns):
u = ns[1]
p = self.prefixes[u]
a.setValue(':'.join((p, name)))
def refitMappings(self):
"""
Refit (normalize) all of the nsprefix mappings.
"""
for n in self.branch:
n.nsprefixes = {}
n = self.node
for u, p in self.prefixes.items():
n.addPrefix(p, u)
def permit(self, ns):
"""
Get whether the I{ns} is to be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be included.
@rtype: boolean
"""
return not self.skip(ns)
def skip(self, ns):
"""
Get whether the I{ns} is to B{not} be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be skipped.
@rtype: boolean
"""
return ns is None or \
( ns == Namespace.default ) or \
( ns == Namespace.xsdns ) or \
( ns == Namespace.xsins) or \
( ns == Namespace.xmlns )
|
snegirigens/DLND
|
refs/heads/master
|
transfer-learning/tensorflow_vgg/utils.py
|
145
|
import skimage
import skimage.io
import skimage.transform
import numpy as np
# synset = [l.strip() for l in open('synset.txt').readlines()]
# returns image of shape [224, 224, 3]
# [height, width, depth]
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
assert (0 <= img).all() and (img <= 1.0).all()
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224), mode='constant')
return resized_img
# returns the top1 string
def print_prob(prob, file_path):
synset = [l.strip() for l in open(file_path).readlines()]
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print(("Top1: ", top1, prob[pred[0]]))
# Get top5 label
top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
print(("Top5: ", top5))
return top1
def load_image2(path, height=None, width=None):
# load image
img = skimage.io.imread(path)
img = img / 255.0
if height is not None and width is not None:
ny = height
nx = width
elif height is not None:
ny = height
nx = img.shape[1] * ny / img.shape[0]
elif width is not None:
nx = width
ny = img.shape[0] * nx / img.shape[1]
else:
ny = img.shape[0]
nx = img.shape[1]
return skimage.transform.resize(img, (ny, nx), mode='constant')
def test():
img = skimage.io.imread("./test_data/starry_night.jpg")
ny = 300
nx = img.shape[1] * ny / img.shape[0]
img = skimage.transform.resize(img, (ny, nx), mode='constant')
skimage.io.imsave("./test_data/test/output.jpg", img)
if __name__ == "__main__":
test()
|
junranhe/tf-faster-rcnn
|
refs/heads/master
|
lib/model/test.py
|
1
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def mult_im_detect(sess, mult_net, im, task_ids):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
outputs = mult_net.test_image(sess, blobs['data'], blobs['im_info'], task_ids)
#_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
res = []
for out in outputs:
scores , bbox_pred, rois = out
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
res.append((scores, pred_boxes))
return res
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.05):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in range(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
|
hsorby/fifa_world_cup_2014
|
refs/heads/master
|
languages/it.py
|
126
|
# coding: utf8
{
'!=': '!=',
'!langcode!': 'it',
'!langname!': 'Italiano',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ',
'%(nrows)s records found': '%(nrows)s record trovati',
'%d seconds ago': '%d secondi fa',
'%s %%{row} deleted': '%s righe ("record") cancellate',
'%s %%{row} updated': '%s righe ("record") modificate',
'%s selected': '%s selezionato',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Number of entries: **%s**': 'Numero di entità: **%s**',
'About': 'About',
'Access Control': 'Controllo Accessi',
'Add': 'Aggiungi',
'Administrative Interface': 'Interfaccia Amministrativa',
'Administrative interface': 'Interfaccia amministrativa',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'È stato rilevato un errore, prego %s la pagina',
'And': 'E',
'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura',
'Are you sure you want to delete this object?': 'Sicuro di voler cancellare questo oggetto ?',
'Available Databases and Tables': 'Database e tabelle disponibili',
'Back': 'Indietro',
'Buy this book': 'Compra questo libro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Non può essere vuoto',
'Change password': 'Cambia Password',
'change password': 'Cambia password',
'Check to delete': 'Seleziona per cancellare',
'Clear': 'Resetta',
'Clear CACHE?': 'Resetta CACHE?',
'Clear DISK': 'Resetta DISK',
'Clear RAM': 'Resetta RAM',
'Client IP': 'Client IP',
'Close': 'Chiudi',
'Cognome': 'Cognome',
'Community': 'Community',
'Components and Plugins': 'Componenti and Plugin',
'contains': 'contiene',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Creato Da',
'Created On': 'Creato Il',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (hidden cols)',
'Current request': 'Richiesta (request) corrente',
'Current response': 'Risposta (response) corrente',
'Current session': 'Sessione (session) corrente',
'customize me!': 'Personalizzami!',
'data uploaded': 'dati caricati',
'Database': 'Database',
'Database %s select': 'Database %s select',
'db': 'db',
'DB Model': 'Modello di DB',
'Delete': 'Cancella',
'Delete:': 'Cancella:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Descrizione',
'design': 'progetta',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentazione',
"Don't know what to do?": 'Non sai cosa fare?',
'done!': 'fatto!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Modifica',
'Edit current record': 'Modifica record corrente',
'edit profile': 'modifica profilo',
'Edit This App': 'Modifica questa applicazione',
'Email and SMS': 'Email e SMS',
'Email non valida': 'Email non valida',
'enter a number between %(min)g and %(max)g': 'enter a number between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'inserisci un intero tra %(min)g e %(max)g',
'Errors': 'Errori',
'Errors in form, please check it out.': 'Errori nel form, ricontrollalo',
'export as csv file': 'esporta come file CSV',
'Export:': 'Esporta:',
'FAQ': 'FAQ',
'First name': 'Nome',
'Forgot username?': 'Dimenticato lo username?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID Gruppo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'hello': 'hello',
'hello world': 'salve mondo',
'Hello World': 'Salve Mondo',
'Hello World in a flash!': 'Salve Mondo in un flash!',
'Home': 'Home',
'How did you get here?': 'Come sei arrivato qui?',
'HTML': 'HTML',
'import': 'importa',
'Import/Export': 'Importa/Esporta',
'Index': 'Indice',
'insert new': 'inserisci nuovo',
'insert new %s': 'inserisci nuovo %s',
'Internal State': 'Stato interno',
'Introduction': 'Introduzione',
'Invalid email': 'Email non valida',
'Invalid login': 'Login non valido',
'Invalid Query': 'Richiesta (query) non valida',
'invalid request': 'richiesta non valida',
'Is Active': "E' attivo",
'Key': 'Chiave',
'Last name': 'Cognome',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Loggato',
'Logged out': 'Disconnesso',
'login': 'accesso',
'Login': 'Login',
'logout': 'uscita',
'Logout': 'Logout',
'Lost Password': 'Password Smarrita',
'Lost password?': 'Password smarrita?',
'lost password?': 'dimenticato la password?',
'Main Menu': 'Menu principale',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Modelli',
'Modified By': 'Modificato da',
'Modified On': 'Modificato il',
'My Sites': 'My Sites',
'Name': 'Nome',
'New': 'Nuovo',
'New password': 'Nuova password',
'New Record': 'Nuovo elemento (record)',
'new record inserted': 'nuovo record inserito',
'next 100 rows': 'prossime 100 righe',
'No databases in this application': 'Nessun database presente in questa applicazione',
'No records found': 'Nessun record trovato',
'Nome': 'Nome',
'Non può essere vuoto': 'Non può essere vuoto',
'not authorized': 'non autorizzato',
'Object or table name': 'Oggeto o nome tabella',
'Old password': 'Vecchia password',
'Online examples': 'Vedere gli esempi',
'Or': 'O',
'or import from csv file': 'oppure importa da file CSV',
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": 'I campi password non sono uguali',
'please input your password again': 'perfavore reimmeti la tua password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': '100 righe precedenti',
'Profile': 'Profilo',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Richiesta (query):',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'il record non esiste',
'Record ID': 'Record ID',
'Record id': 'Record id',
'Register': 'Registrati',
'register': 'registrazione',
'Registration identifier': 'Registration identifier',
'Registration key': 'Chiave di Registazione',
'Registration successful': 'Registrazione avvenuta',
'reload': 'reload',
'Remember me (for 30 days)': 'Ricordami (per 30 giorni)',
'Request reset password': 'Richiedi il reset della password',
'Reset Password key': 'Resetta chiave Password ',
'Role': 'Ruolo',
'Rows in Table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'Save model as...': 'Salva modello come...',
'Save profile': 'Salva profilo',
'Search': 'Ricerca',
'Semantic': 'Semantic',
'Services': 'Servizi',
'Size of cache:': 'Size of cache:',
'starts with': 'comincia con',
'state': 'stato',
'Statistics': 'Statistics',
'Stylesheet': 'Foglio di stile (stylesheet)',
'submit': 'Inviai',
'Submit': 'Invia',
'Support': 'Support',
'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?',
'Table': 'tabella',
'Table name': 'Nome tabella',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista %s',
'The Views': 'The Views',
'This App': 'This App',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Ora (timestamp)',
'too short': 'troppo corto',
'Traceback': 'Traceback',
'TSV (Excel compatible)': 'TSV (Excel compatibile)',
'TSV (Excel compatible, hidden cols)': 'TSV (Excel compatibile, hidden cols)',
'Twitter': 'Twitter',
'unable to parse csv file': 'non riesco a decodificare questo file CSV',
'Update': 'Aggiorna',
'Update:': 'Aggiorna:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID Utente',
'value already in database or empty': 'valore già presente nel database o vuoto',
'Verify Password': 'Verifica Password',
'Videos': 'Videos',
'View': 'Vista',
'Welcome': 'Benvenuto',
'Welcome %s': 'Benvenuto %s',
'Welcome to web2py': 'Benvenuto su web2py',
'Welcome to web2py!': 'Benvenuto in web2py!',
'Which called the function %s located in the file %s': 'che ha chiamato la funzione %s presente nel file %s',
'Working...': 'Working...',
'XML': 'XML',
'You are successfully running web2py': 'Stai eseguendo web2py con successo',
'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità',
'You visited the url %s': "Hai visitato l'URL %s",
}
|
dezelin/scons
|
refs/heads/master
|
scons-local/SCons/Tool/jar.py
|
6
|
"""SCons.Tool.jar
Tool-specific initialization for jar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/jar.py 2014/07/05 09:42:21 garyo"
import SCons.Subst
import SCons.Util
def jarSources(target, source, env, for_signature):
"""Only include sources that are not a manifest file."""
try:
env['JARCHDIR']
except KeyError:
jarchdir_set = False
else:
jarchdir_set = True
jarchdir = env.subst('$JARCHDIR', target=target, source=source)
if jarchdir:
jarchdir = env.fs.Dir(jarchdir)
result = []
for src in source:
contents = src.get_text_contents()
if contents[:16] != "Manifest-Version":
if jarchdir_set:
_chdir = jarchdir
else:
try:
_chdir = src.attributes.java_classdir
except AttributeError:
_chdir = None
if _chdir:
# If we are changing the dir with -C, then sources should
# be relative to that directory.
src = SCons.Subst.Literal(src.get_path(_chdir))
result.append('-C')
result.append(_chdir)
result.append(src)
return result
def jarManifest(target, source, env, for_signature):
"""Look in sources for a manifest file, if any."""
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
return src
return ''
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM')}"
env['JARSUFFIX'] = '.jar'
def exists(env):
# As reported by Jan Nijtmans in issue #2730, the simple
# return env.Detect('jar')
# doesn't always work during initialization. For now, we
# stop trying to detect an executable (analogous to the
# javac Builder).
# TODO: Come up with a proper detect() routine...and enable it.
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
DMLoy/ECommerceBasic
|
refs/heads/master
|
lib/python2.7/site-packages/django/core/files/base.py
|
147
|
from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.utils.encoding import smart_text
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import force_bytes, python_2_unicode_compatible
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif hasattr(self.file, 'name') and os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
elif hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
self._size = self.file.tell()
self.file.seek(pos)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = BytesIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
|
hackerkid/zulip
|
refs/heads/master
|
tools/linter_lib/pep8.py
|
9
|
from typing import List
from zulint.linters import run_command
from zulint.printer import colors
def check_pep8(files: List[str]) -> bool:
if not files:
return False
return run_command("pep8", next(colors), ["pycodestyle", "--", *files]) != 0
|
codesters/codesters
|
refs/heads/master
|
registration/__init__.py
|
50
|
VERSION = (1, 0, 0, 'final', 0)
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
assert len(VERSION) == 5
assert VERSION[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if VERSION[2] == 0 else 3
main = '.'.join(str(x) for x in VERSION[:parts])
sub = ''
if VERSION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[VERSION[3]] + str(VERSION[4])
return str(main + sub)
|
tobegit3hub/deep_cnn
|
refs/heads/master
|
java_predict_client/src/main/proto/tensorflow/contrib/layers/python/ops/sparse_ops_test.py
|
12
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.ops.sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.ops import sparse_ops
class SparseOpsTest(tf.test.TestCase):
def test_dense_to_sparse_tensor_1d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1, 0, 2, 0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_1d_float(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1.5, 0.0, 2.3, 0.0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_1d_bool(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([True, False, True, False])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_1d_str(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([b'qwe', b'', b'ewq', b''])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_1d_str_special_ignore(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_2d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[1, 2, 0, 0], [3, 4, 5, 0]])
result = sess.run(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.shape)
def test_dense_to_sparse_tensor_3d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]])
result = sess.run(st)
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.shape)
def test_dense_to_sparse_tensor_1d_no_shape(self):
with self.test_session() as sess:
tensor = tf.placeholder(shape=[None], dtype=tf.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st, feed_dict={tensor: [0, 100, 0, 3]})
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.shape)
def test_dense_to_sparse_tensor_3d_no_shape(self):
with self.test_session() as sess:
tensor = tf.placeholder(shape=[None, None, None], dtype=tf.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st,
feed_dict={
tensor: [[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]]
})
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.shape)
def test_convert_to_sparse_undef_shape(self):
with self.test_session():
with self.assertRaises(ValueError):
tensor = tf.placeholder(dtype=tf.int32)
sparse_ops.dense_to_sparse_tensor(tensor)
if __name__ == '__main__':
tf.test.main()
|
philrykoff/vertx-web
|
refs/heads/master
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py
|
1429
|
"""Modules copied from Python 3 standard libraries, for internal use only.
Individual classes and functions are found in d2._backport.misc. Intended
usage is to always import things missing from 3.1 from that module: the
built-in/stdlib objects will be used if found.
"""
|
pdf/beets
|
refs/heads/master
|
test/test_player.py
|
15
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for BPD and music playing.
"""
from _common import unittest
from beetsplug import bpd
class CommandParseTest(unittest.TestCase):
def test_no_args(self):
s = ur'command'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [])
def test_one_unquoted_arg(self):
s = ur'command hello'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello'])
def test_two_unquoted_args(self):
s = ur'command hello there'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello', u'there'])
def test_one_quoted_arg(self):
s = ur'command "hello there"'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there'])
def test_heterogenous_args(self):
s = ur'command "hello there" sir'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there', u'sir'])
def test_quote_in_arg(self):
s = ur'command "hello \" there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello " there'])
def test_backslash_in_arg(self):
s = ur'command "hello \\ there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello \ there'])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
vikatory/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_cmd_line_script.py
|
75
|
# tests command line execution of scripts
import contextlib
import importlib
import importlib.machinery
import zipimport
import unittest
import sys
import os
import os.path
import py_compile
import subprocess
import textwrap
from test import support
from test.script_helper import (
make_pkg, make_script, make_zip_pkg, make_zip_script,
assert_python_ok, assert_python_failure, temp_dir,
spawn_python, kill_python)
verbose = support.verbose
example_args = ['test1', 'test2', 'test3']
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIdentical(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check population of magic variables
assertEqual(__name__, '__main__')
from importlib.machinery import BuiltinImporter
_loader = __loader__ if __loader__ is BuiltinImporter else type(__loader__)
print('__loader__==%a' % _loader)
print('__file__==%a' % __file__)
print('__cached__==%a' % __cached__)
print('__package__==%r' % __package__)
# Check PEP 451 details
import os.path
if __package__ is not None:
print('__main__ was located through the import system')
assertIdentical(__spec__.loader, __loader__)
expected_spec_name = os.path.splitext(os.path.basename(__file__))[0]
if __package__:
expected_spec_name = __package__ + "." + expected_spec_name
assertEqual(__spec__.name, expected_spec_name)
assertEqual(__spec__.parent, __package__)
assertIdentical(__spec__.submodule_search_locations, None)
assertEqual(__spec__.origin, __file__)
if __spec__.cached is not None:
assertEqual(__spec__.cached, __cached__)
# Check the sys module
import sys
assertIdentical(globals(), sys.modules[__name__].__dict__)
if __spec__ is not None:
# XXX: We're not currently making __main__ available under its real name
pass # assertIdentical(globals(), sys.modules[__spec__.name].__dict__)
from test import test_cmd_line_script
example_args_list = test_cmd_line_script.example_args
assertEqual(sys.argv[1:], example_args_list)
print('sys.argv[0]==%a' % sys.argv[0])
print('sys.path[0]==%a' % sys.path[0])
# Check the working directory
import os
print('cwd==%a' % os.getcwd())
"""
def _make_test_script(script_dir, script_basename, source=test_source):
to_return = make_script(script_dir, script_basename, source)
importlib.invalidate_caches()
return to_return
def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source=test_source, depth=1):
to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth)
importlib.invalidate_caches()
return to_return
# There's no easy way to pass the script directory in to get
# -m to work (avoiding that is the whole point of making
# directories and zipfiles executable!)
# So we fake it for testing purposes with a custom launch script
launch_source = """\
import sys, os.path, runpy
sys.path.insert(0, %s)
runpy._run_module_as_main(%r)
"""
def _make_launch_script(script_dir, script_basename, module_name, path=None):
if path is None:
path = "os.path.dirname(__file__)"
else:
path = repr(path)
source = launch_source % (path, module_name)
to_return = make_script(script_dir, script_basename, source)
importlib.invalidate_caches()
return to_return
class CmdLineTest(unittest.TestCase):
def _check_output(self, script_name, exit_code, data,
expected_file, expected_argv0,
expected_path0, expected_package,
expected_loader):
if verbose > 1:
print("Output from test script %r:" % script_name)
print(data)
self.assertEqual(exit_code, 0)
printed_loader = '__loader__==%a' % expected_loader
printed_file = '__file__==%a' % expected_file
printed_package = '__package__==%r' % expected_package
printed_argv0 = 'sys.argv[0]==%a' % expected_argv0
printed_path0 = 'sys.path[0]==%a' % expected_path0
printed_cwd = 'cwd==%a' % os.getcwd()
if verbose > 1:
print('Expected output:')
print(printed_file)
print(printed_package)
print(printed_argv0)
print(printed_cwd)
self.assertIn(printed_loader.encode('utf-8'), data)
self.assertIn(printed_file.encode('utf-8'), data)
self.assertIn(printed_package.encode('utf-8'), data)
self.assertIn(printed_argv0.encode('utf-8'), data)
self.assertIn(printed_path0.encode('utf-8'), data)
self.assertIn(printed_cwd.encode('utf-8'), data)
def _check_script(self, script_name, expected_file,
expected_argv0, expected_path0,
expected_package, expected_loader,
*cmd_line_switches):
if not __debug__:
cmd_line_switches += ('-' + 'O' * sys.flags.optimize,)
run_args = cmd_line_switches + (script_name,) + tuple(example_args)
rc, out, err = assert_python_ok(*run_args, __isolated=False)
self._check_output(script_name, rc, out + err, expected_file,
expected_argv0, expected_path0,
expected_package, expected_loader)
def _check_import_error(self, script_name, expected_msg,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
rc, out, err = assert_python_failure(*run_args)
if verbose > 1:
print('Output from test script %r:' % script_name)
print(err)
print('Expected output: %r' % expected_msg)
self.assertIn(expected_msg.encode('utf-8'), err)
def test_dash_c_loader(self):
rc, out, err = assert_python_ok("-c", "print(__loader__)")
expected = repr(importlib.machinery.BuiltinImporter).encode("utf-8")
self.assertIn(expected, out)
def test_stdin_loader(self):
# Unfortunately, there's no way to automatically test the fully
# interactive REPL, since that code path only gets executed when
# stdin is an interactive tty.
p = spawn_python()
try:
p.stdin.write(b"print(__loader__)\n")
p.stdin.flush()
finally:
out = kill_python(p)
expected = repr(importlib.machinery.BuiltinImporter).encode("utf-8")
self.assertIn(expected, out)
@contextlib.contextmanager
def interactive_python(self, separate_stderr=False):
if separate_stderr:
p = spawn_python('-i', bufsize=1, stderr=subprocess.PIPE)
stderr = p.stderr
else:
p = spawn_python('-i', bufsize=1, stderr=subprocess.STDOUT)
stderr = p.stdout
try:
# Drain stderr until prompt
while True:
data = stderr.read(4)
if data == b">>> ":
break
stderr.readline()
yield p
finally:
kill_python(p)
stderr.close()
def check_repl_stdout_flush(self, separate_stderr=False):
with self.interactive_python(separate_stderr) as p:
p.stdin.write(b"print('foo')\n")
p.stdin.flush()
self.assertEqual(b'foo', p.stdout.readline().strip())
def check_repl_stderr_flush(self, separate_stderr=False):
with self.interactive_python(separate_stderr) as p:
p.stdin.write(b"1/0\n")
p.stdin.flush()
stderr = p.stderr if separate_stderr else p.stdout
self.assertIn(b'Traceback ', stderr.readline())
self.assertIn(b'File "<stdin>"', stderr.readline())
self.assertIn(b'ZeroDivisionError', stderr.readline())
def test_repl_stdout_flush(self):
self.check_repl_stdout_flush()
def test_repl_stdout_flush_separate_stderr(self):
self.check_repl_stdout_flush(True)
def test_repl_stderr_flush(self):
self.check_repl_stderr_flush()
def test_repl_stderr_flush_separate_stderr(self):
self.check_repl_stderr_flush(True)
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
self._check_script(script_name, script_name, script_name,
script_dir, None,
importlib.machinery.SourceFileLoader)
def test_script_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
self._check_script(pyc_file, pyc_file,
pyc_file, script_dir, None,
importlib.machinery.SourcelessFileLoader)
def test_directory(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
self._check_script(script_dir, script_name, script_dir,
script_dir, '',
importlib.machinery.SourceFileLoader)
def test_directory_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
self._check_script(script_dir, pyc_file, script_dir,
script_dir, '',
importlib.machinery.SourcelessFileLoader)
def test_directory_error(self):
with temp_dir() as script_dir:
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '',
zipimport.zipimporter)
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '',
zipimport.zipimporter)
def test_zipfile_error(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'not_main')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_module_in_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script')
self._check_script(launch_name, script_name, script_name,
script_dir, 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_module_in_package_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name,
zip_name, 'test_pkg', zipimport.zipimporter)
def test_module_in_subpackage_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name,
zip_name, 'test_pkg.test_pkg',
zipimport.zipimporter)
def test_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, script_name,
script_name, script_dir, 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_package_compiled(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, pyc_file,
pyc_file, script_dir, 'test_pkg',
importlib.machinery.SourcelessFileLoader)
def test_package_error(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
msg = ("'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_package_recursion(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
main_dir = os.path.join(pkg_dir, '__main__')
make_pkg(main_dir)
msg = ("Cannot use package as __main__ module; "
"'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_issue8202(self):
# Make sure package __init__ modules see "-m" in sys.argv0 while
# searching for the module to execute
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir, "import sys; print('init_argv0==%r' % sys.argv[0])")
script_name = _make_test_script(pkg_dir, 'script')
rc, out, err = assert_python_ok('-m', 'test_pkg.script', *example_args, __isolated=False)
if verbose > 1:
print(out)
expected = "init_argv0==%r" % '-m'
self.assertIn(expected.encode('utf-8'), out)
self._check_output(script_name, rc, out,
script_name, script_name, '', 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_issue8202_dash_c_file_ignored(self):
# Make sure a "-c" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
with open("-c", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-c',
'import sys; print("sys.path[0]==%r" % sys.path[0])',
__isolated=False)
if verbose > 1:
print(out)
expected = "sys.path[0]==%r" % ''
self.assertIn(expected.encode('utf-8'), out)
def test_issue8202_dash_m_file_ignored(self):
# Make sure a "-m" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'other')
with support.change_cwd(path=script_dir):
with open("-m", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-m', 'other', *example_args,
__isolated=False)
self._check_output(script_name, rc, out,
script_name, script_name, '', '',
importlib.machinery.SourceFileLoader)
def test_dash_m_error_code_is_one(self):
# If a module is invoked with the -m command line flag
# and results in an error that the return code to the
# shell is '1'
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'other',
"if __name__ == '__main__': raise ValueError")
rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
if verbose > 1:
print(out)
self.assertEqual(rc, 1)
def test_pep_409_verbiage(self):
# Make sure PEP 409 syntax properly suppresses
# the context of an exception
script = textwrap.dedent("""\
try:
raise ValueError
except:
raise NameError from None
""")
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script', script)
exitcode, stdout, stderr = assert_python_failure(script_name)
text = stderr.decode('ascii').split('\n')
self.assertEqual(len(text), 4)
self.assertTrue(text[0].startswith('Traceback'))
self.assertTrue(text[1].startswith(' File '))
self.assertTrue(text[3].startswith('NameError'))
def test_non_ascii(self):
# Mac OS X denies the creation of a file with an invalid UTF-8 name.
# Windows allows to create a name with an arbitrary bytes name, but
# Python cannot a undecodable bytes argument to a subprocess.
if (support.TESTFN_UNDECODABLE
and sys.platform not in ('win32', 'darwin')):
name = os.fsdecode(support.TESTFN_UNDECODABLE)
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
# Issue #16218
source = 'print(ascii(__file__))\n'
script_name = _make_test_script(os.curdir, name, source)
self.addCleanup(support.unlink, script_name)
rc, stdout, stderr = assert_python_ok(script_name)
self.assertEqual(
ascii(script_name),
stdout.rstrip().decode('ascii'),
'stdout=%r stderr=%r' % (stdout, stderr))
self.assertEqual(0, rc)
def test_issue20500_exit_with_exception_value(self):
script = textwrap.dedent("""\
import sys
error = None
try:
raise ValueError('some text')
except ValueError as err:
error = err
if error:
sys.exit(error)
""")
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script', script)
exitcode, stdout, stderr = assert_python_failure(script_name)
text = stderr.decode('ascii')
self.assertEqual(text, "some text")
def test_main():
support.run_unittest(CmdLineTest)
support.reap_children()
if __name__ == '__main__':
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.