repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kutenai/django
|
refs/heads/master
|
tests/nested_foreign_keys/tests.py
|
174
|
from __future__ import unicode_literals
from django.test import TestCase
from .models import (
Event, Movie, Package, PackageNullFK, Person, Screening, ScreeningNullFK,
)
# These are tests for #16715. The basic scheme is always the same: 3 models with
# 2 relations. The first relation may be null, while the second is non-nullable.
# In some cases, Django would pick the wrong join type for the second relation,
# resulting in missing objects in the queryset.
#
# Model A
# | (Relation A/B : nullable)
# Model B
# | (Relation B/C : non-nullable)
# Model C
#
# Because of the possibility of NULL rows resulting from the LEFT OUTER JOIN
# between Model A and Model B (i.e. instances of A without reference to B),
# the second join must also be LEFT OUTER JOIN, so that we do not ignore
# instances of A that do not reference B.
#
# Relation A/B can either be an explicit foreign key or an implicit reverse
# relation such as introduced by one-to-one relations (through multi-table
# inheritance).
class NestedForeignKeysTests(TestCase):
def setUp(self):
self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening')), 2)
# This failed.
self.assertEqual(len(Event.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
# Simple filter/exclude queries for good measure.
self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_inheritance_null_FK(self):
Event.objects.create()
ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk')), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk__movie')), 3)
self.assertEqual(len(Event.objects.values()), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__title')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk', 'screeningnullfk__movie__title')), 3)
self.assertEqual(Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2)
def test_null_exclude(self):
screening = ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(
list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)),
[screening])
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening')), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_explicit_ForeignKey_NullFK(self):
PackageNullFK.objects.create()
screening = ScreeningNullFK.objects.create(movie=None)
screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
PackageNullFK.objects.create(screening=screening)
PackageNullFK.objects.create(screening=screening_with_movie)
self.assertEqual(len(PackageNullFK.objects.all()), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening')), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening__movie')), 3)
self.assertEqual(len(PackageNullFK.objects.values()), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__title')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk', 'screening__movie__title')), 3)
self.assertEqual(PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2)
# Some additional tests for #16715. The only difference is the depth of the
# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
# checks if promotion of join types works for deeper nesting too.
class DeeplyNestedForeignKeysTests(TestCase):
def setUp(self):
self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Event.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Event.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie__director=self.director).count(), 1)
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Package.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Package.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie__director=self.director).count(), 1)
|
aheadley/python-nepugia
|
refs/heads/master
|
nepugia/scripts/dump_items.py
|
2
|
#!/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Alex Headley <aheadley@waysaboutstuff.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def main():
pass
if __name__ == '__main__':
main()
|
maxsocl/django
|
refs/heads/master
|
tests/i18n/exclude/__init__.py
|
428
|
# This package is used to test the --exclude option of
# the makemessages and compilemessages management commands.
# The locale directory for this app is generated automatically
# by the test cases.
from django.utils.translation import ugettext as _
# Translators: This comment should be extracted
dummy1 = _("This is a translatable string.")
# This comment should not be extracted
dummy2 = _("This is another translatable string.")
|
newrocknj/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/images/urls.py
|
46
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.images.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<id>[^/]+)/update_metadata/$',
views.UpdateMetadataView.as_view(), name='update_metadata'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
)
|
culturagovbr/e-pracas
|
refs/heads/master
|
gestor/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-04-03 16:00
from __future__ import unicode_literals
import core.models
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('pracas', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ArquivosProcessoVinculacao',
fields=[
('id_pub', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID Público')),
('data_envio', models.DateTimeField(auto_now_add=True, verbose_name='Data de Envio do Arquivo')),
('tipo', models.CharField(max_length=15, verbose_name='Tipo de Arquivo')),
('arquivo', models.FileField(upload_to=core.models.upload_doc_to)),
('verificado', models.BooleanField(default=False, verbose_name='Arquivo verificado pelo gestor do Ministério')),
('comentarios', models.TextField(blank=True, null=True, verbose_name='Comentários sobre o arquivo')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Gestor',
fields=[
('id_pub', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID Público')),
('atual', models.BooleanField(default=False, verbose_name='Gestor Atual')),
('data_inicio_gestao', models.DateField(default=django.utils.timezone.now, verbose_name='Data de Inicio da Gestão')),
('data_encerramento_gestao', models.DateField(null=True, verbose_name='Data de Encerramento da Gestão')),
('praca', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='gestor', to='pracas.Praca')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='gestor', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProcessoVinculacao',
fields=[
('id_pub', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID Público')),
('data_abertura', models.DateField(auto_now_add=True, verbose_name='Data de Abertura do Processo')),
('data_finalizacao', models.DateField(blank=True, default=datetime.date.today, null=True, verbose_name='Data de Conclusão do Processo de Vinculação')),
('aprovado', models.BooleanField(default=False, verbose_name='Processo aprovado')),
('finalizado', models.BooleanField(default=False, verbose_name='Processo finalizado')),
('despacho', models.TextField(blank=True, null=True, verbose_name='Despacho do Processo')),
('praca', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pracas.Praca')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RegistroProcessoVinculacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField(default=datetime.date.today, verbose_name='Data do Evento')),
('situacao', models.CharField(choices=[('c', 'Cancelado'), ('p', 'Pendente'), ('a', 'Aprovado')], max_length=1, verbose_name='Situacao')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descrição')),
('processo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registro', to='gestor.ProcessoVinculacao')),
],
options={
'ordering': ['-data'],
},
),
migrations.AddField(
model_name='arquivosprocessovinculacao',
name='processo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='gestor.ProcessoVinculacao'),
),
migrations.AddField(
model_name='arquivosprocessovinculacao',
name='verificado_por',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
trhoden/ceph-deploy
|
refs/heads/master
|
ceph_deploy/util/arg_validators.py
|
19
|
import socket
import argparse
import re
class RegexMatch(object):
"""
Performs regular expression match on value.
If the regular expression pattern matches it will it will return an error
message that will work with argparse.
"""
def __init__(self, pattern, statement=None):
self.string_pattern = pattern
self.pattern = re.compile(pattern)
self.statement = statement
if not self.statement:
self.statement = "must match pattern %s" % self.string_pattern
def __call__(self, string):
match = self.pattern.search(string)
if match:
raise argparse.ArgumentError(None, self.statement)
return string
class Hostname(object):
"""
Checks wether a given hostname is resolvable in DNS, otherwise raising and
argparse error.
"""
def __init__(self, _socket=None):
self.socket = _socket or socket # just used for testing
def __call__(self, string):
parts = string.split(':', 1)
name = parts[0]
host = parts[-1]
try:
self.socket.getaddrinfo(host, 0)
except self.socket.gaierror:
msg = "hostname: %s is not resolvable" % host
raise argparse.ArgumentError(None, msg)
try:
self.socket.getaddrinfo(name, 0, 0, 0, 0, self.socket.AI_NUMERICHOST)
except self.socket.gaierror:
return string # not an IP
else:
msg = '%s must be a hostname not an IP' % name
raise argparse.ArgumentError(None, msg)
return string
class Subnet(object):
"""
A really dumb validator to ensure that we are receiving a subnet (or
something that actually looks like a subnet).
It doesn't enforce at all the constraints of proper validation as that has
its own set of caveats that are difficult to implement given that
ceph-deploy doesn't (should not) include third party dependencies.
"""
def __call__(self, string):
ip = string.split('/')[0]
ip_parts = ip.split('.')
if len(ip_parts) != 4:
err = "subnet must have at least 4 numbers separated by dots like x.x.x.x/xx, but got: %s" % string
raise argparse.ArgumentError(None, err)
if [i for i in ip_parts[:4] if i.isalpha()]: # only numbers
err = "subnet must have digits separated by dots like x.x.x.x/xx, but got: %s" % string
raise argparse.ArgumentError(None, err)
if len(string.split('/')) != 2:
err = "subnet must contain a slash, like x.x.x.x/xx, but got: %s" % string
raise argparse.ArgumentError(None, err)
return string
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/pygments/lexers/rebol.py
|
25
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/fromImportSubModuleDunderAll/pkg1/__init__.py
|
148
|
__all__ = ['m1']
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/host/lib64/scons-2.1.0/SCons/Tool/hpcc.py
|
21
|
"""SCons.Tool.hpcc
Tool-specific initialization for HP aCC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpcc.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Util
import cc
def generate(env):
"""Add Builders and construction variables for aCC & cc to an Environment."""
cc.generate(env)
env['CXX'] = 'aCC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS +Z')
def exists(env):
return env.Detect('aCC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gltn/stdm
|
refs/heads/master
|
stdm/settings/projectionSelector.py
|
1
|
'''
Name : ProjectionSelector
Description : Load generic projections selector dialog for user to select the srs id
Date : 17/Oct/13
copyright : (C) 2013 by Solomon Njoroge
email : njoroge.solomon@yahoo.com
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
from qgis.PyQt.QtWidgets import QDialog
from qgis.gui import QgsProjectionSelectionDialog
class ProjectionSelector(QDialog):
def __init__(self, parent):
super(ProjectionSelector, self).__init__(parent)
self.parent = parent
def loadAvailableSystems(self):
coordSys = ""
crsDlg = QgsProjectionSelectionDialog(self.parent)
if crsDlg.exec_() == QDialog.Accepted:
coordSys = str(crsDlg.crs().authid())
return coordSys
|
google/matched_markets
|
refs/heads/master
|
matched_markets/tests/test_tbr.py
|
1
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for //ads/amt/geoexperiments/methodology/tbr.py."""
import os
from absl import flags
from matched_markets.examples import salesandcost
from matched_markets.methodology import semantics
from matched_markets.methodology import tbr
import unittest
class TBRTest(unittest.TestCase):
def setUp(self):
"""This method will be run before each of the test methods in the class."""
super(TBRTest, self).setUp()
# Load the salesandcost dataset.
csv_path = 'matched_markets/csv/'
csv_dir = os.path.join("", csv_path)
self.data = salesandcost.example_data_formatted(csv_dir)
# Data frame names for the salesandcost example.
self.key_response = 'sales'
self.key_cost = 'cost'
self.key_group = 'geo.group'
self.key_period = 'period'
self.key_geo = 'geo'
self.key_date = 'date'
# Semantics for groups and periods.
self.groups = semantics.GroupSemantics()
self.periods = semantics.PeriodSemantics()
def testSemanticsAvailable(self):
"""Check if semantics for the data are available."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
tbr_model.fit(self.data,
target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
# Check one member of each of the col names, group and period semantics.
self.assertEqual(tbr_model.df_names.group, self.key_group)
self.assertEqual(tbr_model.groups.treatment, self.groups.treatment)
self.assertEqual(tbr_model.periods.cooldown, self.periods.cooldown)
def testAnalysisDataGenerated(self):
"""Checks whether the salesandcost example data is available."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
tbr_model.fit(self.data,
target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
constructed_cols = set(tbr_model.analysis_data.keys())
correct_cols = {target, self.key_period}
self.assertCountEqual(constructed_cols, correct_cols)
def testPeriodIndexFailsWithEmptyPeriods(self):
"""Tests making a period index for an empty iterable raises a ValueError."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
tbr_model.fit(self.data,
target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
with self.assertRaises(ValueError):
tbr_model._make_period_index([])
def testPeriodIndexWorksForZero(self):
"""Tests making a period index for an empty iterable raises a ValueError."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
tbr_model.fit(self.data,
target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
num_in_period = sum(tbr_model.analysis_data[self.key_period] == 0)
index_count = sum(tbr_model._make_period_index(0))
self.assertEqual(index_count, num_in_period)
def testResponseModelCorrect(self):
"""Tests whether model for response has correct coefficients."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
tbr_model.fit(self.data,
target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
# Extract slope coefficient from python model.
response_coef_py = tbr_model.pre_period_model.params[1]
# Slope coefficient under the R package.
response_coef_r = 0.9997001
self.assertAlmostEqual(response_coef_py, response_coef_r)
def testCausalCumulativePeriods(self):
"""Tests whether model for response has correct coefficients."""
# Fully set up a TBR object.
tbr_model = tbr.TBR()
target = self.key_response
# Engineer some 'causal' costs in the cooldown period.
data = self.data.copy()
cool_index = data[self.key_period] == 2
treat_index = data[self.key_group] == 2
data.loc[(cool_index & treat_index), target] += 100.0
tbr_model.fit(data, target,
key_response=self.key_response,
key_cost=self.key_cost,
key_group=self.key_group,
key_period=self.key_period,
key_date=self.key_date)
dist_test = tbr_model.causal_cumulative_distribution(periods=(1))
dist_cool = tbr_model.causal_cumulative_distribution(periods=(1, 2))
val_test = dist_test.mean()[-1]
val_cool = dist_cool.mean()[-1]
self.assertLessEqual(val_test, val_cool)
if __name__ == '__main__':
unittest.main()
|
unioslo/cerebrum
|
refs/heads/master
|
Cerebrum/modules/event_publisher/scim.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Implementation of SCIM messages.
https://tools.ietf.org/html/draft-hunt-idevent-scim-00#section-2.2
"""
from __future__ import absolute_import
import calendar
import datetime
import uuid
import six
from Cerebrum.config.configuration import (Configuration,
ConfigDescriptor,
Namespace)
from Cerebrum.config.settings import String
class EntityTypeToApiRouteMapConfig(Configuration):
"""Configuration for Entity Type -> API Route"""
entity = ConfigDescriptor(
String,
default=u'entities',
doc=u'API Route for entities')
person = ConfigDescriptor(
String,
default=u'persons',
doc=u'API Route for person entities')
account = ConfigDescriptor(
String,
default=u'accounts',
doc=u'API Route for account entities')
group = ConfigDescriptor(
String,
default=u'groups',
doc=u'API Route for group entities')
ou = ConfigDescriptor(
String,
default=u'ous',
doc=u'API Route for OU entities')
class ScimFormatterConfig(Configuration):
"""Configuration for scim events"""
issuer = ConfigDescriptor(
String,
default=u'cerebrum',
doc=u'Issuer field in scim')
urltemplate = ConfigDescriptor(
String,
default=u'https://cerebrum.example.com/v1/{entity_type}/{entity_id}',
doc=u'Format string for URL (use {entity_type} and {entity_id} as '
u'placeholders')
keytemplate = ConfigDescriptor(
String,
default=u'no.uio.cerebrum.scim.{entity_type}.{event}',
doc=(u'Format string for routing key (use {entity_type} and {event} '
u'as placeholders'))
entity_type_map = ConfigDescriptor(
Namespace,
config=EntityTypeToApiRouteMapConfig)
uri_prefix = ConfigDescriptor(
String,
default=u'urn:ietf:params:event:SCIM',
doc=u'Default URI Prefix for SCIM-events'
)
class ScimFormatter(object):
def __init__(self, config=None):
self.config = config or ScimFormatterConfig()
@staticmethod
def make_timestamp(dt_object=None):
""" Make a timestamp from a datetime object. """
if dt_object is None:
dt_object = datetime.datetime.utcnow()
return int(calendar.timegm(dt_object.utctimetuple()))
def get_entity_type_route(self, entity_type):
""" Get the API route for the given entity type. """
default = self.config.entity_type_map.entity
return getattr(self.config.entity_type_map, entity_type, default)
def build_url(self, entity_type, entity_id):
return self.config.urltemplate.format(entity_type=entity_type,
entity_id=entity_id)
def get_uri(self, action):
""" Format an uri for the message. """
return '{}:{}'.format(self.config.uri_prefix, action)
def get_key(self, entity_type, event):
return self.config.keytemplate.format(entity_type=entity_type,
event=event)
class EventScimFormatter(ScimFormatter):
""" Generate SCIM payload from Event objects. """
def __init__(self, config=None):
super(EventScimFormatter, self).__init__(config)
def get_entity_type(self, entity_ref):
""" Get and translate the entity_type of an EntityRef. """
return super(EventScimFormatter, self).get_entity_type_route(
entity_ref.entity_type
)
@staticmethod
def get_entity_id(entity_ref):
""" Get and translate the entity_id of an EntityRef. """
if entity_ref.entity_type in ('account', 'group'):
return entity_ref.ident
return six.text_type(entity_ref.entity_id)
def get_url(self, entity_ref):
""" Format an url to the EntityRef. """
entity_type = self.get_entity_type(entity_ref)
entity_id = self.get_entity_id(entity_ref)
return self.build_url(entity_type, entity_id)
def get_key(self, event_type, entity_ref):
""" Format a event key from the Event and EntityRef. """
entity_type = self.get_entity_type(entity_ref)
return super(EventScimFormatter, self).get_key(
entity_type=entity_type,
event=event_type.verb)
def __call__(self, event):
"""Create and return payload as jsonable dict."""
jti = six.text_type(uuid.uuid4())
event_uri = self.get_uri(event.event_type.verb)
issued_at = self.make_timestamp(event.timestamp)
issuer = self.config.issuer
audience = event.context
subject = self.get_url(event.subject)
payload = {
'jti': jti,
'eventUris': [event_uri, ],
'iat': issued_at,
'iss': issuer,
'aud': list(audience),
'sub': subject,
}
if event.attributes:
payload.setdefault(
event_uri,
dict())['attributes'] = list(event.attributes)
if event.objects:
payload.setdefault(
event_uri,
dict())['object'] = [self.get_url(o) for o in event.objects]
if event.scheduled is not None:
# assume datetime.datetime, although mx.DateTime will also work
# .strftime('%s') is not official and it will not work in Windows
payload['nbf'] = self.make_timestamp(event.scheduled)
payload['resourceType'] = self.get_entity_type(event.subject)
return payload
|
ahotam/micropython
|
refs/heads/master
|
py/makeversionhdr.py
|
17
|
"""
Generate header file with macros defining MicroPython version info.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import sys
import os
import datetime
import subprocess
def get_version_info_from_git():
# Python 2.6 doesn't have check_output, so check for that
try:
subprocess.check_output
subprocess.check_call
except AttributeError:
return None
# Note: git describe doesn't work if no tag is available
try:
git_tag = subprocess.check_output(["git", "describe", "--dirty", "--always"], stderr=subprocess.STDOUT, universal_newlines=True).strip()
except subprocess.CalledProcessError as er:
if er.returncode == 128:
# git exit code of 128 means no repository found
return None
git_tag = ""
except OSError:
return None
try:
git_hash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], stderr=subprocess.STDOUT, universal_newlines=True).strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
except OSError:
return None
try:
# Check if there are any modified files.
subprocess.check_call(["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT)
# Check if there are any staged files.
subprocess.check_call(["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
git_hash += "-dirty"
except OSError:
return None
# Try to extract MicroPython version from git tag
if git_tag.startswith("v"):
ver = git_tag[1:].split("-")[0].split(".")
if len(ver) == 2:
ver.append("0")
else:
ver = ["0", "0", "1"]
return git_tag, git_hash, ver
def get_version_info_from_docs_conf():
with open("%s/docs/conf.py" % sys.argv[0].rsplit("/", 2)[0]) as f:
for line in f:
if line.startswith("release = '"):
ver = line.strip()[10:].strip("'")
git_tag = "v" + ver
ver = ver.split(".")
if len(ver) == 2:
ver.append("0")
return git_tag, "<no hash>", ver
return None
def make_version_header(filename):
# Get version info using git, with fallback to docs/conf.py
info = get_version_info_from_git()
if info is None:
info = get_version_info_from_docs_conf()
git_tag, git_hash, ver = info
# Generate the file with the git and version info
file_data = """\
// This file was generated by py/makeversionhdr.py
#define MICROPY_GIT_TAG "%s"
#define MICROPY_GIT_HASH "%s"
#define MICROPY_BUILD_DATE "%s"
#define MICROPY_VERSION_MAJOR (%s)
#define MICROPY_VERSION_MINOR (%s)
#define MICROPY_VERSION_MICRO (%s)
#define MICROPY_VERSION_STRING "%s.%s.%s"
""" % (git_tag, git_hash, datetime.date.today().strftime("%Y-%m-%d"),
ver[0], ver[1], ver[2], ver[0], ver[1], ver[2])
# Check if the file contents changed from last time
write_file = True
if os.path.isfile(filename):
with open(filename, 'r') as f:
existing_data = f.read()
if existing_data == file_data:
write_file = False
# Only write the file if we need to
if write_file:
print("Generating %s" % filename)
with open(filename, 'w') as f:
f.write(file_data)
if __name__ == "__main__":
make_version_header(sys.argv[1])
|
SimonSapin/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/pytest/bench/bench.py
|
35
|
import sys
if __name__ == "__main__":
import cProfile
import pytest # NOQA
import pstats
script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py"
stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
p = pstats.Stats("prof")
p.strip_dirs()
p.sort_stats("cumulative")
print(p.print_stats(500))
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/distutils/config.py
|
91
|
"""distutils.pypirc
Provides the PyPIRCCommand class, the base class for the command classes
that uses .pypirc in the distutils.command package.
"""
import os
from configparser import ConfigParser
from distutils.cmd import Command
DEFAULT_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:%s
password:%s
"""
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % \
DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server')]
boolean_options = ['show-response']
def _get_rc_file(self):
"""Returns rc file path."""
return os.path.join(os.path.expanduser('~'), '.pypirc')
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(DEFAULT_PYPIRC % (username, password))
def _read_pypirc(self):
"""Reads the .pypirc file."""
rc = self._get_rc_file()
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
repository = self.repository or self.DEFAULT_REPOSITORY
realm = self.realm or self.DEFAULT_REALM
config = ConfigParser()
config.read(rc)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM),
('password', None)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
# work around people having "repository" for the "pypi"
# section of their config set to the HTTP (rather than
# HTTPS) URL
if (server == 'pypi' and
repository in (self.DEFAULT_REPOSITORY, 'pypi')):
current['repository'] = self.DEFAULT_REPOSITORY
return current
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
def _read_pypi_response(self, response):
"""Read and decode a PyPI HTTP response."""
import cgi
content_type = response.getheader('content-type', 'text/plain')
encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
return response.read().decode(encoding)
def initialize_options(self):
"""Initialize options."""
self.repository = None
self.realm = None
self.show_response = 0
def finalize_options(self):
"""Finalizes options."""
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
if self.realm is None:
self.realm = self.DEFAULT_REALM
|
vonnenaut/realmofreckoning
|
refs/heads/master
|
character.py
|
2
|
"""
Character is a class which stores information pertaining to the player. It's imported by Realm.py and it deals with player attributes, inventory and coordinates.
"""
class Character(object):
""" Represents the player with methods for inventory management, searching areas, generating narrative, moving and dying. """
def __init__(self, sex, name, hp, stam, mp, gld, inv, coords):
self._sex = sex
self._name = name
self.hp = hp
self.stamina = stam
self.mp = mp
self.gold = gld
self.inventory = inv
self.newplayer = True
self._max_inv_size = 5
self.coords = [0,0]
def __str__(self):
return "\nPlayer attributes for " + str(self._name) + ":\nsex: " + str(self._sex) + "\nhit points: " + str(self.hp) + "\nstamina: " + str(self.stamina) + "\nmagic points: " + str(self.mp) + "\ngold: " + str(self.gold) + "\ninventory items: " + str(self.inventory) + "\nNew player? " + str(self.newplayer) + "\nMax # inventory items: " + str(self._max_inv_size) + "\nLocation: " + str(self.get_coords())
def set_coords(self, coords):
""" sets the player's coordinates in the Realm """
self.coords[0] = coords[0]
self.coords[1] = coords[1]
def get_coord(self, element):
""" returns the player's specified coordinate, x or y, in the Realm """
return self.coords[element]
def get_coords(self):
""" returns the player's coordinates, (x, y) in the Realm """
return self.coords
def attrib_list(self):
print self
def get_inventory(self):
return self.inventory
def add_to_inventory(self, item):
""" adds an item to player's inventory if inventory isn't yet full; otherwise returns a boolean indicating success (True) or failure (False) """
if len(self.inventory) < self._max_inv_size:
self.get_inventory().append(item)
return True
else:
return False
def teleport(self, coords):
""" move character to the specified coordinates. """
self.coords = coords
|
nimasmi/wagtail
|
refs/heads/master
|
wagtail/admin/api/filters.py
|
10
|
from rest_framework.filters import BaseFilterBackend
from wagtail.api.v2.utils import BadRequestError, parse_boolean
from wagtail.core import hooks
from wagtail.core.models import UserPagePermissionsProxy
class HasChildrenFilter(BaseFilterBackend):
"""
Filters the queryset by checking if the pages have children or not.
This is useful when you want to get just the branches or just the leaves.
"""
def filter_queryset(self, request, queryset, view):
if 'has_children' in request.GET:
try:
has_children_filter = parse_boolean(request.GET['has_children'])
except ValueError:
raise BadRequestError("has_children must be 'true' or 'false'")
if has_children_filter is True:
return queryset.filter(numchild__gt=0)
else:
return queryset.filter(numchild=0)
return queryset
class ForExplorerFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.GET.get('for_explorer'):
if not hasattr(queryset, '_filtered_by_child_of'):
raise BadRequestError("filtering by for_explorer without child_of is not supported")
parent_page = queryset._filtered_by_child_of
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
queryset = hook(parent_page, queryset, request)
user_perms = UserPagePermissionsProxy(request.user)
queryset = queryset & user_perms.explorable_pages()
return queryset
|
flodolo/bedrock
|
refs/heads/master
|
bedrock/firefox/templatetags/helpers.py
|
2
|
from django.conf import settings
import jinja2
from django.template.loader import render_to_string
from django_jinja import library
from bedrock.firefox.firefox_details import firefox_desktop, firefox_android, firefox_ios
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def desktop_builds(channel, builds=None, locale=None, force_direct=False,
force_full_installer=False, force_funnelcake=False,
funnelcake_id=False, locale_in_transition=False, classified=False):
builds = builds or []
l_version = firefox_desktop.latest_builds(locale, channel)
# Developer Edition is now based on the Beta channel, so the build list
# should be generated from the Beta locales.
if channel == 'alpha':
l_version = firefox_desktop.latest_builds(locale, 'beta')
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
for plat_os, plat_os_pretty in firefox_desktop.platforms(channel, classified):
os_pretty = plat_os_pretty
# Firefox Nightly: The Windows stub installer is now universal,
# automatically detecting a 32-bit and 64-bit desktop, so the
# win64-specific entry can be skipped.
if channel == 'nightly':
if plat_os == 'win':
continue
if plat_os == 'win64':
plat_os = 'win'
os_pretty = 'Windows 32/64-bit'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
locale_in_transition=locale_in_transition,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
return builds
def android_builds(channel, builds=None):
builds = builds or []
link = firefox_android.get_download_url(channel.lower())
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
def ios_builds(channel, builds=None):
builds = builds or []
link = firefox_ios.get_download_url(channel)
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': link})
return builds
@library.global_function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', platform='all',
dom_id=None, locale=None, force_direct=False,
force_full_installer=False, force_funnelcake=False,
alt_copy=None, button_color='button-green',
locale_in_transition=False, download_location=None):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta', 'alpha', or 'nightly'.
:param platform: Target platform: 'desktop', 'android', 'ios', or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param alt_copy: Specifies alternate copy to use for download buttons.
:param button_color: Color of download button. Default to 'button-green'.
:param locale_in_transition: Include the page locale in transitional download link.
:param download_location: Specify the location of download button for
GA reporting: 'primary cta', 'nav', 'sub nav', or 'other'.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
show_ios = platform in ['all', 'ios']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
version = firefox_desktop.latest_version(channel)
builds = desktop_builds(channel, builds, locale, force_direct,
force_full_installer, force_funnelcake,
funnelcake_id, locale_in_transition)
if show_android:
version = firefox_android.latest_version(channel)
builds = android_builds(channel, builds)
if show_ios:
version = firefox_ios.latest_version(channel)
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': firefox_ios.get_download_url()})
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-%s' % platform,
'builds': builds,
'id': dom_id,
'channel': alt_channel,
'show_desktop': show_desktop,
'show_android': show_android,
'show_ios': show_ios,
'alt_copy': alt_copy,
'button_color': button_color,
'download_location': download_location
}
html = render_to_string('firefox/includes/download-button.html', data,
request=ctx['request'])
return jinja2.Markup(html)
@library.global_function
@jinja2.contextfunction
def download_firefox_desktop_list(ctx, channel='release', dom_id=None, locale=None,
force_full_installer=False):
"""
Return a HTML list of platform download links for Firefox desktop
:param channel: name of channel: 'release', 'beta', 'alpha' or 'nightly'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
"""
dom_id = dom_id or 'download-platform-list-%s' % (channel)
locale = locale or get_locale(ctx['request'])
# Make sure funnelcake_id is not passed as builds are often Windows only.
builds = desktop_builds(channel, None, locale, True, force_full_installer,
False, False, False, True)
recommended_builds = []
traditional_builds = []
for plat in builds:
# Add 32-bit label for Windows and Linux builds.
if channel != 'nightly':
if plat['os'] == 'win':
plat['os_pretty'] = 'Windows 32-bit'
if plat['os'] == 'linux':
plat['os_pretty'] = 'Linux 32-bit'
if (plat['os'] in firefox_desktop.platform_classification['recommended'] or
channel == 'nightly' and plat['os'] == 'win'):
recommended_builds.append(plat)
else:
traditional_builds.append(plat)
data = {
'id': dom_id,
'builds': {
'recommended': recommended_builds,
'traditional': traditional_builds,
},
}
html = render_to_string('firefox/includes/download-list.html', data,
request=ctx['request'])
return jinja2.Markup(html)
@library.global_function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
anchor = None
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
# There is now only one /all page URL - issue 8096
if page == 'all':
if platform == 'desktop':
if channel == 'beta':
anchor = 'product-desktop-beta'
elif channel == 'developer':
anchor = 'product-desktop-developer'
elif channel == 'nightly':
anchor = 'product-desktop-nightly'
elif channel == 'organizations':
anchor = 'product-desktop-esr'
else:
anchor = 'product-desktop-release'
elif platform == 'android':
if channel == 'beta':
anchor = 'product-android-beta'
elif channel == 'nightly':
anchor = 'product-android-nightly'
else:
anchor = 'product-android-release'
else:
if channel:
kwargs['channel'] = channel
if platform != 'desktop':
kwargs['platform'] = platform
# Firefox for Android and iOS have the system requirements page on SUMO
if platform in ['android', 'ios'] and page == 'sysreq':
return settings.FIREFOX_MOBILE_SYSREQ_URL
anchor = '#' + anchor if anchor else ''
return reverse(f'firefox.{page}', kwargs=kwargs) + anchor
|
NervanaSystems/coach
|
refs/heads/master
|
rl_coach/tests/exploration_policies/test_greedy.py
|
1
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.exploration_policies.greedy import Greedy
import numpy as np
@pytest.mark.unit_test
def test_get_action():
# discrete control
action_space = DiscreteActionSpace(3)
policy = Greedy(action_space)
best_action, _ = policy.get_action(np.array([10, 20, 30]))
assert best_action == 2
# continuous control
action_space = BoxActionSpace(np.array([10]))
policy = Greedy(action_space)
best_action = policy.get_action(np.array([1, 1, 1]))
assert np.all(best_action == np.array([1, 1, 1]))
@pytest.mark.unit_test
def test_get_control_param():
action_space = DiscreteActionSpace(3)
policy = Greedy(action_space)
assert policy.get_control_param() == 0
|
pk-sam/crosswalk-test-suite
|
refs/heads/master
|
tools/xml/xmlverifier.py
|
3
|
#!/usr/bin/python
#encoding:utf-8
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, xin <xinx.liu@intel.com>
import os
import csv
import re
import sys
import platform
import logging
import logging.handlers
from xml.etree import ElementTree
LOG = None
LOG_LEVEL = logging.DEBUG
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def verify_xml(xml_dir, split_sign):
if not os.path.isdir(xml_dir):
if not os.path.isfile(xml_dir):
LOG.error("Not dir and not file error")
return
else:
name, ext = os.path.splitext(xml_dir)
if not ext == '.xml':
print '%s is not a xml' % xml_path
return
else:
verify_path(xml_dir, split_sign)
paths = [ item for item in os.walk(xml_dir) ]
for path, dirs, files in paths:
for filename in files:
if filename == "tests.full.xml":
verify_path(path + split_sign + filename, split_sign)
def verify_path(xml_path, split_sign):
LOG.info("+Verify xml: " + xml_path)
try:
root_node = ElementTree.parse(xml_path)
except Exception as e:
LOG.error("xml parse error")
return False
suite_node = root_node.find('suite')
set_nodes = suite_node.findall('set')
id_list = []
purpose_list = []
set_type = ['js', 'wrt', 'ref', 'qunit', 'script', 'pyunit', 'androidunit']
for set_node in set_nodes:
try:
if set_node.attrib['type'] not in set_type:
LOG.info("set wrong type: " + set_node.attrib['name'])
break
except Exception as e:
LOG.error("set no type: " + set_node.attrib['name'])
return False
if set_node.attrib['type'] == 'script':
break
case_nodes = set_node.findall('testcase')
for case_node in case_nodes:
verify_path = os.path.dirname(xml_path)
casepath = case_node.find('description/test_script_entry').text
if casepath is None:
break
id_list.append(case_node.attrib['id'])
purpose_list.append(case_node.attrib['purpose'])
arraypath = casepath.split('?')[0].split(split_sign)
if len(arraypath) < 3:
break
if arraypath.count('http:') > 0:
del arraypath[0:5]
else:
del arraypath[0:3]
for i in range(len(arraypath)):
verify_path += split_sign + arraypath[i]
if not os.path.exists(verify_path):
LOG.info("path no found: " + verify_path)
temp_array = []
for xid in range(len(id_list)):
if id_list.count(id_list[xid]) > 1 and id_list[xid] not in temp_array:
LOG.info(str(id_list.count(id_list[xid])) + " same id : " + id_list[xid])
temp_array.append(id_list[xid])
del temp_array[:]
for xpurpose in range(len(purpose_list)):
if purpose_list.count(purpose_list[xpurpose]) > 1 and purpose_list[xpurpose] not in temp_array:
LOG.info(str(purpose_list.count(purpose_list[xpurpose])) + " same purpose: " + purpose_list[xpurpose])
temp_array.append(purpose_list[xpurpose])
del temp_array[:]
LOG.info("===Verify case path, id and purpose finish===")
def echo_about():
"""
This function will print the user guide and stop toolkit.
"""
about = 'xmlverifier V1.0\n-v <path> | Verify case path, id, purpose and set type are right\n'
print about
sys.exit()
def main():
"""
main function will call different functions according to the command line argvs followed the toolkit.
"""
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
sys_name = platform.system()
if sys_name == 'Windows':
split_sign = '\\'
elif sys_name == 'Linux':
split_sign = '/'
if len(sys.argv) != 3:
print 'Error: No enough argv!'
echo_about()
else:
{'-v': lambda : verify_xml(sys.argv[2], split_sign)}[sys.argv[1]]()
if __name__ == '__main__':
main()
|
dims/cinder
|
refs/heads/master
|
cinder/tests/unit/keymgr/mock_key_mgr.py
|
6
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A mock implementation of a key manager that stores keys in a dictionary.
This key manager implementation is primarily intended for testing. In
particular, it does not store keys persistently. Lack of a centralized key
store also makes this implementation unsuitable for use among different
services.
Note: Instantiating this class multiple times will create separate key stores.
Keys created in one instance will not be accessible from other instances of
this class.
"""
import array
import binascii
import uuid
from cinder import exception
from cinder.keymgr import key
from cinder.keymgr import key_mgr
from cinder.volume import utils
class MockKeyManager(key_mgr.KeyManager):
"""Mocking manager for integration tests.
This mock key manager implementation supports all the methods specified
by the key manager interface. This implementation stores keys within a
dictionary, and as a result, it is not acceptable for use across different
services. Side effects (e.g., raising exceptions) for each method are
handled as specified by the key manager interface.
This key manager is not suitable for use in production deployments.
"""
def __init__(self):
self.keys = {}
def _generate_hex_key(self, **kwargs):
key_length = kwargs.get('key_length', 256)
# hex digit => 4 bits
hex_encoded = utils.generate_password(length=key_length // 4,
symbolgroups='0123456789ABCDEF')
return hex_encoded
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
key_bytes = array.array('B', binascii.unhexlify(_hex)).tolist()
return key.SymmetricKey('AES', key_bytes)
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
key = self._generate_key(**kwargs)
return self.store_key(ctxt, key)
def _generate_key_id(self):
key_id = str(uuid.uuid4())
while key_id in self.keys:
key_id = str(uuid.uuid4())
return key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.NotAuthorized()
key_id = self._generate_key_id()
self.keys[key_id] = key
return key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
copied_key_id = self._generate_key_id()
self.keys[copied_key_id] = self.keys[key_id]
return copied_key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.keys[key_id]
def delete_key(self, ctxt, key_id, **kwargs):
"""Deletes the key identified by the specified id.
A NotAuthorized exception is raised if the context is None and a
KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
del self.keys[key_id]
|
nitzmahone/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_stack.py
|
7
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author:
- "Mathieu Bultel (@matbu)"
- "Steve Baker (@steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
tag:
description:
- Tag for the stack that should be created, name could be char and digit, no space
version_added: "2.5"
template:
description:
- Path of the template file to use for the stack creation
environment:
description:
- List of environment files that should be used for the stack creation
parameters:
description:
- Dictionary of parameters for the stack creation
rollback:
description:
- Rollback stack creation
type: bool
default: 'yes'
timeout:
description:
- Maximum number of seconds to wait for the stack creation
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
tag: "{{ tag_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
returned: always
stack:
description: stack info
type: complex
returned: always
contains:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
from ansible.module_utils._text import to_native
def _create_stack(module, stack, cloud, sdk):
try:
stack = cloud.create_stack(module.params['name'],
tags=module.params['tag'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _update_stack(module, stack, cloud, sdk):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
tag=dict(required=False, default=None),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
sdk, cloud = openstack_cloud_from_module(module)
try:
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud, sdk)
else:
stack = _update_stack(module, stack, cloud, sdk)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
|
andim27/magiccamp
|
refs/heads/master
|
build/lib/django/template/loaders/eggs.py
|
65
|
# Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
import warnings
warnings.warn(
"'django.template.loaders.eggs.load_template_source' is deprecated; use 'django.template.loaders.eggs.Loader' instead.",
PendingDeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = resource_string is not None
|
mehdidc/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/classification.py
|
18
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
sobomax/libelperiodic
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.core import Extension
import os
elp_srcs = ['src/periodic.c', 'src/prdic_math.c', \
'src/prdic_fd.c', \
'src/prdic_pfd.c', \
'src/prdic_main_fd.c', 'src/prdic_main_pfd.c', \
'src/prdic_main.c', \
'src/prdic_recfilter.c', 'src/prdic_shmtrig.c', \
'src/prdic_sign.c']
module1 = Extension('_elperiodic', sources = elp_srcs, \
extra_link_args = ['-Wl,--version-script=src/Symbol.map',])
def get_ex_mod():
if 'NO_PY_EXT' in os.environ:
return None
return [module1]
with open("README.md", "r") as fh:
long_description = fh.read()
kwargs = {
'name':'ElPeriodic',
'version':'1.2',
'description':'Phase-locked userland scheduling library',
'long_description': long_description,
'long_description_content_type': "text/markdown",
'author':'Maksym Sobolyev',
'author_email':'sobomax@gmail.com',
'url':'https://github.com/sobomax/libelperiodic',
'packages':['elperiodic',],
'package_dir':{'elperiodic':'python'},
'ext_modules': get_ex_mod(),
'python_requires': '>=2.7',
'classifiers': [
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python'
]
}
if __name__ == '__main__':
setup(**kwargs)
|
Kryz/sentry
|
refs/heads/master
|
src/sentry/migrations/0112_auto__chg_field_option_value__chg_field_useroption_value__chg_field_pr.py
|
36
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
superberny70/pelisalacarta
|
refs/heads/develop
|
python/main-classic/platformcode/xbmc_info_window.py
|
4
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
import xbmcgui
from core import logger
from core.tmdb import Tmdb
ID_BUTTON_CLOSE = 10003
ID_BUTTON_PREVIOUS = 10025
ID_BUTTON_NEXT = 10026
ID_BUTTON_CANCEL = 10027
ID_BUTTON_OK = 10028
class InfoWindow(xbmcgui.WindowXMLDialog):
otmdb = None
item_title = ""
item_serie = ""
item_temporada = 0
item_episodio = 0
result = {}
# PARA TMDB
@staticmethod
def get_language(lng):
# Cambiamos el formato del Idioma
languages = {
'aa': 'Afar', 'ab': 'Abkhazian', 'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'am': 'Amharic',
'ar': 'Arabic', 'an': 'Aragonese', 'as': 'Assamese', 'av': 'Avaric', 'ae': 'Avestan', 'ay': 'Aymara',
'az': 'Azerbaijani', 'ba': 'Bashkir', 'bm': 'Bambara', 'eu': 'Basque', 'be': 'Belarusian', 'bn': 'Bengali',
'bh': 'Bihari languages', 'bi': 'Bislama', 'bo': 'Tibetan', 'bs': 'Bosnian', 'br': 'Breton',
'bg': 'Bulgarian', 'my': 'Burmese', 'ca': 'Catalan; Valencian', 'cs': 'Czech', 'ch': 'Chamorro',
'ce': 'Chechen', 'zh': 'Chinese',
'cu': 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', 'cv': 'Chuvash',
'kw': 'Cornish', 'co': 'Corsican', 'cr': 'Cree', 'cy': 'Welsh', 'da': 'Danish', 'de': 'German',
'dv': 'Divehi; Dhivehi; Maldivian', 'nl': 'Dutch; Flemish', 'dz': 'Dzongkha', 'en': 'English',
'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese', 'fa': 'Persian', 'fj': 'Fijian',
'fi': 'Finnish', 'fr': 'French', 'fy': 'Western Frisian', 'ff': 'Fulah', 'Ga': 'Georgian',
'gd': 'Gaelic; Scottish Gaelic', 'ga': 'Irish', 'gl': 'Galician', 'gv': 'Manx',
'el': 'Greek, Modern (1453-)', 'gn': 'Guarani', 'gu': 'Gujarati', 'ht': 'Haitian; Haitian Creole',
'ha': 'Hausa', 'he': 'Hebrew', 'hz': 'Herero', 'hi': 'Hindi', 'ho': 'Hiri Motu', 'hr': 'Croatian',
'hu': 'Hungarian', 'hy': 'Armenian', 'ig': 'Igbo', 'is': 'Icelandic', 'io': 'Ido',
'ii': 'Sichuan Yi; Nuosu', 'iu': 'Inuktitut', 'ie': 'Interlingue; Occidental',
'ia': 'Interlingua (International Auxiliary Language Association)', 'id': 'Indonesian', 'ik': 'Inupiaq',
'it': 'Italian', 'jv': 'Javanese', 'ja': 'Japanese', 'kl': 'Kalaallisut; Greenlandic', 'kn': 'Kannada',
'ks': 'Kashmiri', 'ka': 'Georgian', 'kr': 'Kanuri', 'kk': 'Kazakh', 'km': 'Central Khmer',
'ki': 'Kikuyu; Gikuyu', 'rw': 'Kinyarwanda', 'ky': 'Kirghiz; Kyrgyz', 'kv': 'Komi', 'kg': 'Kongo',
'ko': 'Korean', 'kj': 'Kuanyama; Kwanyama', 'ku': 'Kurdish', 'lo': 'Lao', 'la': 'Latin', 'lv': 'Latvian',
'li': 'Limburgan; Limburger; Limburgish', 'ln': 'Lingala', 'lt': 'Lithuanian',
'lb': 'Luxembourgish; Letzeburgesch', 'lu': 'Luba-Katanga', 'lg': 'Ganda', 'mk': 'Macedonian',
'mh': 'Marshallese', 'ml': 'Malayalam', 'mi': 'Maori', 'mr': 'Marathi', 'ms': 'Malay', 'Mi': 'Micmac',
'mg': 'Malagasy', 'mt': 'Maltese', 'mn': 'Mongolian', 'na': 'Nauru', 'nv': 'Navajo; Navaho',
'nr': 'Ndebele, South; South Ndebele', 'nd': 'Ndebele, North; North Ndebele', 'ng': 'Ndonga',
'ne': 'Nepali', 'nn': 'Norwegian Nynorsk; Nynorsk, Norwegian', 'nb': 'Bokmål, Norwegian; Norwegian Bokmål',
'no': 'Norwegian', 'oc': 'Occitan (post 1500)', 'oj': 'Ojibwa', 'or': 'Oriya', 'om': 'Oromo',
'os': 'Ossetian; Ossetic', 'pa': 'Panjabi; Punjabi', 'pi': 'Pali', 'pl': 'Polish', 'pt': 'Portuguese',
'ps': 'Pushto; Pashto', 'qu': 'Quechua', 'ro': 'Romanian; Moldavian; Moldovan', 'rn': 'Rundi',
'ru': 'Russian', 'sg': 'Sango', 'rm': 'Romansh', 'sa': 'Sanskrit', 'si': 'Sinhala; Sinhalese',
'sk': 'Slovak', 'sl': 'Slovenian', 'se': 'Northern Sami', 'sm': 'Samoan', 'sn': 'Shona', 'sd': 'Sindhi',
'so': 'Somali', 'st': 'Sotho, Southern', 'es': 'Spanish', 'sc': 'Sardinian', 'sr': 'Serbian', 'ss': 'Swati',
'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish', 'ty': 'Tahitian', 'ta': 'Tamil', 'tt': 'Tatar',
'te': 'Telugu', 'tg': 'Tajik', 'tl': 'Tagalog', 'th': 'Thai', 'ti': 'Tigrinya',
'to': 'Tonga (Tonga Islands)', 'tn': 'Tswana', 'ts': 'Tsonga', 'tk': 'Turkmen', 'tr': 'Turkish',
'tw': 'Twi', 'ug': 'Uighur; Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Venda',
'vi': 'Vietnamese', 'vo': 'Volapük', 'wa': 'Walloon', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
'yo': 'Yoruba', 'za': 'Zhuang; Chuang', 'zu': 'Zulu'}
return languages.get(lng, lng)
def get_scraper_data(self, data_in):
self.otmdb = None
# logger.debug(str(data_in))
if self.listData:
# Datos comunes a todos los listados
infoLabels = self.scraper().get_infoLabels(origen=data_in)
if "original_language" in infoLabels:
infoLabels["language"] = self.get_language(infoLabels["original_language"])
infoLabels["puntuacion"] = "%s/10 (%s)" % (infoLabels.get("rating", "?"), infoLabels.get("votes", "N/A"))
self.result = infoLabels
def Start(self, data, caption="Información del vídeo", item=None, scraper=Tmdb):
# Capturamos los parametros
self.caption = caption
self.item = item
self.indexList = -1
self.listData = None
self.return_value = None
self.scraper = scraper
logger.debug(data)
if type(data) == list:
self.listData = data
self.indexList = 0
data = self.listData[self.indexList]
self.get_scraper_data(data)
# Muestra la ventana
self.doModal()
return self.return_value
def __init__(self, *args):
self.caption = ""
self.item = None
self.listData = None
self.indexList = 0
self.return_value = None
self.scraper = Tmdb
def onInit(self):
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)
else:
self.setCoordinateResolution(5)
# Ponemos el título y las imagenes
self.getControl(10002).setLabel(self.caption)
self.getControl(10004).setImage(self.result.get("fanart", ""))
self.getControl(10005).setImage(self.result.get("thumbnail", "images/img_no_disponible.png"))
# Cargamos los datos para el formato pelicula
if self.result.get("mediatype", "movie") == "movie":
self.getControl(10006).setLabel("Título:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Título original:")
self.getControl(10009).setLabel(self.result.get("originaltitle", "N/A"))
self.getControl(100010).setLabel("Idioma original:")
self.getControl(100011).setLabel(self.result.get("language", "N/A"))
self.getControl(100012).setLabel("Puntuación:")
self.getControl(100013).setLabel(self.result.get("puntuacion", "N/A"))
self.getControl(100014).setLabel("Lanzamiento:")
self.getControl(100015).setLabel(self.result.get("release_date", "N/A"))
self.getControl(100016).setLabel("Géneros:")
self.getControl(100017).setLabel(self.result.get("genre", "N/A"))
# Cargamos los datos para el formato serie
else:
self.getControl(10006).setLabel("Serie:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Idioma original:")
self.getControl(10009).setLabel(self.result.get("language", "N/A"))
self.getControl(100010).setLabel("Puntuación:")
self.getControl(100011).setLabel(self.result.get("puntuacion", "N/A"))
self.getControl(100012).setLabel("Géneros:")
self.getControl(100013).setLabel(self.result.get("genre", "N/A"))
if self.result.get("season"):
self.getControl(100014).setLabel("Título temporada:")
self.getControl(100015).setLabel(self.result.get("temporada_nombre", "N/A"))
self.getControl(100016).setLabel("Temporada:")
self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " +
self.result.get("seasons", "N/A"))
if self.result.get("episode"):
self.getControl(100014).setLabel("Título:")
self.getControl(100015).setLabel(self.result.get("episode_title", "N/A"))
self.getControl(100018).setLabel("Episodio:")
self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " +
self.result.get("episodes", "N/A"))
self.getControl(100020).setLabel("Emisión:")
self.getControl(100021).setLabel(self.result.get("date", "N/A"))
# Sinopsis
if self.result['plot']:
self.getControl(100022).setLabel("Sinopsis:")
self.getControl(100023).setText(self.result.get("plot", "N/A"))
else:
self.getControl(100022).setLabel("")
self.getControl(100023).setText("")
# Cargamos los botones si es necesario
self.getControl(10024).setVisible(self.indexList > -1) # Grupo de botones
self.getControl(ID_BUTTON_PREVIOUS).setEnabled(self.indexList > 0) # Anterior
if self.listData:
m = len(self.listData)
else:
m = 1
self.getControl(ID_BUTTON_NEXT).setEnabled(self.indexList + 1 != m) # Siguiente
self.getControl(100029).setLabel("(%s/%s)" % (self.indexList + 1, m)) # x/m
# Ponemos el foco en el Grupo de botones, si estuviera desactivado "Anterior" iria el foco al boton "Siguiente"
# si "Siguiente" tb estuviera desactivado pasara el foco al botón "Cancelar"
self.setFocus(self.getControl(10024))
return self.return_value
def onClick(self, _id):
logger.info("onClick id=" + repr(_id))
if _id == ID_BUTTON_PREVIOUS and self.indexList > 0:
self.indexList -= 1
self.get_scraper_data(self.listData[self.indexList])
self.onInit()
elif _id == ID_BUTTON_NEXT and self.indexList < len(self.listData) - 1:
self.indexList += 1
self.get_scraper_data(self.listData[self.indexList])
self.onInit()
elif _id == ID_BUTTON_OK or _id == ID_BUTTON_CLOSE or _id == ID_BUTTON_CANCEL:
self.close()
if _id == ID_BUTTON_OK:
self.return_value = self.listData[self.indexList]
else:
self.return_value = None
def onAction(self, action):
logger.info("action="+repr(action.getId()))
action = action.getId()
# Obtenemos el foco
focus = self.getFocusId()
# Accion 1: Flecha izquierda
if action == 1:
if focus == ID_BUTTON_OK:
self.setFocus(self.getControl(ID_BUTTON_CANCEL))
elif focus == ID_BUTTON_CANCEL:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(ID_BUTTON_NEXT))
elif self.indexList > 0:
# vamos al botón Anterior ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(ID_BUTTON_PREVIOUS))
elif focus == ID_BUTTON_NEXT:
if self.indexList > 0:
# vamos al botón Anterior
self.setFocus(self.getControl(ID_BUTTON_PREVIOUS))
# Accion 2: Flecha derecha
elif action == 2:
if focus == ID_BUTTON_PREVIOUS:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(ID_BUTTON_NEXT))
else:
# vamos al botón Cancelar ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(ID_BUTTON_CANCEL))
elif focus == ID_BUTTON_NEXT:
self.setFocus(self.getControl(ID_BUTTON_CANCEL))
elif focus == ID_BUTTON_CANCEL:
self.setFocus(self.getControl(ID_BUTTON_OK))
# Pulsa ESC o Atrás, simula click en boton cancelar
if action in [10, 92]:
self.onClick(ID_BUTTON_CANCEL)
|
dart-lang/sdk
|
refs/heads/master
|
samples-dev/swarm/gen_manifest.py
|
2
|
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#!/usr/bin/env python3
#
"""
Usage: gen_manifest.py DIRECTORY EXTENSIONS CACHE-FILE HTML-FILES...
Outputs an app cache manifest file including (recursively) all files with the
provided in the directory with the given extensions. Each html files is then
processed and a corresponding <name>-cache.html file is created, pointing at
the appropriate cache manifest file, which is saved as <name>-cache.manifest.
Example:
gen_manifest.py war *.css,*.html,*.js,*.png cache.manifest foo.html bar.html
Produces: foo-cache.html, bar-cache.html, and cache.manifest
"""
import fnmatch
import os
import random
import sys
import datetime
cacheDir = sys.argv[1]
extensions = sys.argv[2].split(',')
manifestName = sys.argv[3]
htmlFiles = sys.argv[4:]
os.chdir(cacheDir)
print("Generating manifest from root path: " + cacheDir)
patterns = extensions + htmlFiles
def matches(file):
for pattern in patterns:
if fnmatch.fnmatch(file, pattern):
return True
return False
def findFiles(rootDir):
for root, dirs, files in os.walk(rootDir):
for f in files:
# yields this file relative to the given directory
yield os.path.join(root, f)[(len(rootDir) + 1):]
manifest = []
manifest.append("CACHE MANIFEST")
# print out a random number to force the browser to update the cache manifest
manifest.append("# %s" % datetime.datetime.now().isoformat())
# print out each file to be included in the cache manifest
manifest.append("CACHE:")
manifest += (f for f in findFiles('.') if matches(f))
# force the browser to request any other files over the network,
# even when offline (better failure mode)
manifest.append("NETWORK:")
manifest.append("*")
with open(manifestName, 'w') as f:
f.writelines(m + '\n' for m in manifest)
print("Created manifest file: " + manifestName)
for htmlFile in htmlFiles:
cachedHtmlFile = htmlFile.replace('.html', '-cache.html')
text = open(htmlFile, 'r').read()
text = text.replace('<html>', '<html manifest="%s">' % manifestName, 1)
with open(cachedHtmlFile, 'w') as output:
output.write(text)
print("Processed html file: %s -> %s" % (htmlFile, cachedHtmlFile))
print("Successfully generated manifest and html files")
|
40223112/w16test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/ui/slider.py
|
603
|
from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
|
10
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_gather_ops.gather_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedGatherNdOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
DOCSTRING_PARAMS = [[['000', '001'], ['010']],
[['100'], ['110', '111', '112'], ['120']],
[[], ['210']]] # pyformat: disable
@parameterized.parameters([
#=========================================================================
# Docstring Examples
#=========================================================================
dict(
descr='Docstring example 1',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2], [0]],
expected=ragged_factory_ops.constant_value(
[[[], [b'210']], [[b'000', b'001'], [b'010']]])),
dict(
descr='Docstring example 2',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'210'], [b'000', b'001']])),
dict(
descr='Docstring example 3',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[0, 0, 1], [1, 1, 2]],
expected=[b'001', b'112']),
#=========================================================================
# Indices with 0 values (selects the entire params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [0], result: [B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[b'a', b'b', b'c'], [b'd']])),
dict(
descr='params: [B1, (B2)], indices: [A1, 0], result: [A1, B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]])),
dict(
descr=('params: [B1, (B2)], indices: [A1, A2, 0], '
'result: [A1, A2, B1, (B2)]'),
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([1, 3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]]])),
dict(
descr='params: [B1], indices: [A1, (A2), 0], result: [A1, (A2), B1]',
params=['a'],
indices=ragged_factory_ops.constant_value(
[[[], []], [[]]],
ragged_rank=1,
dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a'], [b'a']], [[b'a']]],
ragged_rank=1)),
#=========================================================================
# Indices with 1 value (selects row from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 1], result: [A1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1], [0]],
expected=ragged_factory_ops.constant_value(
[[b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 1], '
'result: [A1, (B2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1], [1]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f']], [[b'e', b'f']]])),
dict(
descr=('params: [B1, B2, B3], indices: [A1, (A2), 1], '
'result: [A1, (A2), B2, B3]'),
params=[[['a']], [['b']]],
indices=ragged_factory_ops.constant_value([[[0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[[b'a']]]], ragged_rank=1)),
#=========================================================================
# Indices with 2 values (selects row & col from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 2], result: [A1]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1, 0], [0, 0], [0, 2]],
expected=ragged_factory_ops.constant_value([b'd', b'a', b'c'])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 2], '
'result: [A1, (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0], [0, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, A2, 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']]])),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, 2], '
'result: [A1, A2, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, A3, 2], '
'result: [A1, A2, A3, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[[1, 0], [0, 1], [0, 0]]]],
expected=[[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]]),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, (A2), 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=ragged_factory_ops.constant_value(
[[[1, 0], [0, 1]], [[0, 0]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd']], [[b'a', b'b', b'c']]])),
#=========================================================================
# Indices with 3 values
#=========================================================================
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
ragged_rank=1),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 1]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), (B3), B4], indices: [A1, 3], '
'result: [A1, B4]'),
params=ragged_factory_ops.constant_value(
[[[['a', 'b'], ['c', 'd']], [['e', 'f']]]],
ragged_rank=2),
indices=[[0, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[[b'c', b'd'], [b'a', b'b'], [b'e', b'f']]),
]) # pyformat: disable
def testRaggedGatherNd(self, descr, params, indices, expected):
result = ragged_gather_ops.gather_nd(params, indices)
self.assertRaggedEqual(result, expected)
def testRaggedGatherNdUnknownRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd']])
indices1 = array_ops.placeholder(dtypes.int32, shape=None)
indices2 = array_ops.placeholder(dtypes.int32, shape=[None])
with self.assertRaisesRegexp(ValueError,
'indices.rank be statically known.'):
ragged_gather_ops.gather_nd(params, indices1)
with self.assertRaisesRegexp(
ValueError, r'indices.shape\[-1\] must be statically known.'):
ragged_gather_ops.gather_nd(params, indices2)
@parameterized.parameters([
dict(
params=['a'],
indices=0,
error=(ValueError, errors.InvalidArgumentError)),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=0,
message='indices.rank must be at least 1.'),
dict(
params=['a', 'b', 'c'],
indices=ragged_factory_ops.constant_value([[0]]),
message='The innermost dimension of indices may not be ragged'),
])
def testRaggedGatherNdStaticError(self,
params,
indices,
message=None,
error=ValueError):
with self.assertRaisesRegexp(error, message):
ragged_gather_ops.gather_nd(params, indices)
if __name__ == '__main__':
googletest.main()
|
project-zerus/fbthrift
|
refs/heads/master
|
thrift/lib/py/transport/TTwisted.py
|
16
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from zope.interface import implements, Interface, Attribute
from struct import unpack
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
from thrift.server import TServer
from thrift.transport import TTransport
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self._errormsg = None
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
if sys.version_info[0] >= 3:
client_req_iter = self.client._reqs.items()
else:
client_req_iter = self.client._reqs.iteritems()
for k, v in client_req_iter:
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message=self._errormsg or 'Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
def lengthLimitExceeded(self, length):
self._errormsg = 'Received frame too large (%s > %s)' % (
length, self.MAX_LENGTH)
self.transport.loseConnection()
class TwistedRpcConnectionContext(TServer.TConnectionContext):
def __init__(self, client_socket):
self._client_socket = client_socket
def getPeerName(self):
return self._client_socket.getpeername()
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class ThriftHeaderServerProtocol(Protocol):
MAX_LENGTH = 1 << 24
recvd = b""
def dataReceived(self, recvd):
self.recvd = self.recvd + recvd
while len(self.recvd) >= 4:
length, = unpack(b"!I", self.recvd[:4])
if length > self.MAX_LENGTH:
self.transport.loseConnection()
return
if len(self.recvd) < length + 4:
break
packet = self.recvd[0:4 + length]
self.recvd = self.recvd[4 + length:]
self.stringReceived(packet)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
# HeaderTransport will have already done msg length checking,
# and already adds the frame size. Write directly.
self.transport.write(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = iprot
tmo = tmi
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
if isinstance(iprot_factory, THeaderProtocolFactory):
self.protocol = ThriftHeaderServerProtocol
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
|
aguijarro/DataSciencePython
|
refs/heads/master
|
DataWrangling/extracting_data_xml.py
|
1
|
#!/usr/bin/env python
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys
import xml.etree.ElementTree as ET
import os
# setup the location files
DATADIR = '../Data/'
DATAFILE = "exampleResearchArticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_authors(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {"fnm": author.find('fnm').text,
"snm": author.find('snm').text,
"email": author.find('email').text
}
# data["fnm"] = author.find('./fnm').text
# data["snm"] = author.find('./snm').text
# data["email"] = author.find('./email').text
authors.append(data)
return authors
def test(article_file):
solution = [{'fnm': 'Omer', 'snm': 'Mei-Dan',
'email': 'omer@extremegate.com'
},
{'fnm': 'Mike', 'snm': 'Carmont',
'email': 'mcarmont@hotmail.com'
},
{'fnm': 'Lior', 'snm': 'Laver',
'email': 'laver17@gmail.com'
},
{'fnm': 'Meir', 'snm': 'Nyska',
'email': 'nyska@internet-zahav.net'
},
{'fnm': 'Hagay', 'snm': 'Kammar',
'email': 'kammarh@gmail.com'
},
{'fnm': 'Gideon', 'snm': 'Mann',
'email': 'gideon.mann.md@gmail.com'
},
{'fnm': 'Barnaby', 'snm': 'Clarck',
'email': 'barns.nz@gmail.com'
},
{'fnm': 'Eugene', 'snm': 'Kots',
'email': 'eukots@gmail.com'
}
]
root = get_root(article_file)
data = get_authors(root)
assert data[0] == solution[0]
assert data[1]["fnm"] == solution[1]["fnm"]
if __name__ == '__main__':
article_file = os.path.join(DATADIR, DATAFILE)
test(article_file)
|
grepme/CMPUT410Lab01
|
refs/heads/master
|
virt_env/virt1/lib/python2.7/site-packages/WebTest-2.0.17-py2.7.egg/webtest/app.py
|
4
|
# (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org)
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Routines for testing WSGI applications.
Most interesting is TestApp
"""
from __future__ import unicode_literals
import os
import re
import json
import random
import fnmatch
import mimetypes
from base64 import b64encode
from six import StringIO
from six import BytesIO
from six import string_types
from six import binary_type
from six import text_type
from six.moves import http_cookiejar
from webtest.compat import urlparse
from webtest.compat import urlencode
from webtest.compat import to_bytes
from webtest.compat import escape_cookie_value
from webtest.response import TestResponse
from webtest import forms
from webtest import lint
from webtest import utils
import webob
__all__ = ['TestApp', 'TestRequest']
class AppError(Exception):
def __init__(self, message, *args):
if isinstance(message, binary_type):
message = message.decode('utf8')
str_args = ()
for arg in args:
if isinstance(arg, webob.Response):
body = arg.body
if isinstance(body, binary_type):
if arg.charset:
arg = body.decode(arg.charset)
else:
arg = repr(body)
elif isinstance(arg, binary_type):
try:
arg = arg.decode('utf8')
except UnicodeDecodeError:
arg = repr(arg)
str_args += (arg,)
message = message % str_args
Exception.__init__(self, message)
class CookiePolicy(http_cookiejar.DefaultCookiePolicy):
"""A subclass of DefaultCookiePolicy to allow cookie set for
Domain=localhost."""
def return_ok_domain(self, cookie, request):
if cookie.domain == '.localhost':
return True
return http_cookiejar.DefaultCookiePolicy.return_ok_domain(
self, cookie, request)
def set_ok_domain(self, cookie, request):
if cookie.domain == '.localhost':
return True
return http_cookiejar.DefaultCookiePolicy.set_ok_domain(
self, cookie, request)
class TestRequest(webob.BaseRequest):
"""A subclass of webob.Request"""
ResponseClass = TestResponse
class TestApp(object):
"""
Wraps a WSGI application in a more convenient interface for
testing. It uses extended version of :class:`webob.BaseRequest`
and :class:`webob.Response`.
:param app:
May be an WSGI application or Paste Deploy app,
like ``'config:filename.ini#test'``.
.. versionadded:: 2.0
It can also be an actual full URL to an http server and webtest
will proxy requests with `wsgiproxy`.
:type app:
WSGI application
:param extra_environ:
A dictionary of values that should go
into the environment for each request. These can provide a
communication channel with the application.
:type extra_environ:
dict
:param relative_to:
A directory used for file
uploads are calculated relative to this. Also ``config:``
URIs that aren't absolute.
:type relative_to:
string
:param cookiejar:
:class:`cookielib.CookieJar` alike API that keeps cookies
across requets.
:type cookiejar:
CookieJar instance
.. attribute:: cookies
A convenient shortcut for a dict of all cookies in
``cookiejar``.
:param parser_features:
Passed to BeautifulSoup when parsing responses.
:type parser_features:
string or list
:param json_encoder:
Passed to json.dumps when encoding json
:type json_encoder:
A subclass of json.JSONEncoder
:param lint:
If True (default) then check that the application is WSGI compliant
:type lint:
A boolean
"""
RequestClass = TestRequest
def __init__(self, app, extra_environ=None, relative_to=None,
use_unicode=True, cookiejar=None, parser_features=None,
json_encoder=None, lint=True):
if 'WEBTEST_TARGET_URL' in os.environ:
app = os.environ['WEBTEST_TARGET_URL']
if isinstance(app, string_types):
if app.startswith('http'):
try:
from wsgiproxy import HostProxy
except ImportError: # pragma: no cover
raise ImportError((
'Using webtest with a real url requires WSGIProxy2. '
'Please install it with: '
'pip install WSGIProxy2'))
if '#' not in app:
app += '#httplib'
url, client = app.split('#', 1)
app = HostProxy(url, client=client)
else:
from paste.deploy import loadapp
# @@: Should pick up relative_to from calling module's
# __file__
app = loadapp(app, relative_to=relative_to)
self.app = app
self.lint = lint
self.relative_to = relative_to
if extra_environ is None:
extra_environ = {}
self.extra_environ = extra_environ
self.use_unicode = use_unicode
if cookiejar is None:
cookiejar = http_cookiejar.CookieJar(policy=CookiePolicy())
self.cookiejar = cookiejar
if parser_features is None:
parser_features = 'html.parser'
self.RequestClass.ResponseClass.parser_features = parser_features
if json_encoder is None:
json_encoder = json.JSONEncoder
self.JSONEncoder = json_encoder
def get_authorization(self):
"""Allow to set the HTTP_AUTHORIZATION environ key. Value should looks
like ``('Basic', ('user', 'password'))``
If value is None the the HTTP_AUTHORIZATION is removed
"""
return self.authorization_value
def set_authorization(self, value):
self.authorization_value = value
if value is not None:
invalid_value = (
"You should use a value like ('Basic', ('user', 'password'))"
)
if isinstance(value, (list, tuple)) and len(value) == 2:
authtype, val = value
if authtype == 'Basic' and val and \
isinstance(val, (list, tuple)):
val = ':'.join(list(val))
val = b64encode(to_bytes(val)).strip()
val = val.decode('latin1')
else:
raise ValueError(invalid_value)
value = str('%s %s' % (authtype, val))
else:
raise ValueError(invalid_value)
self.extra_environ.update({
'HTTP_AUTHORIZATION': value,
})
else:
if 'HTTP_AUTHORIZATION' in self.extra_environ:
del self.extra_environ['HTTP_AUTHORIZATION']
authorization = property(get_authorization, set_authorization)
@property
def cookies(self):
return dict([(cookie.name, cookie.value) for cookie in self.cookiejar])
def set_cookie(self, name, value):
"""
Sets a cookie to be passed through with requests.
"""
value = escape_cookie_value(value)
cookie = http_cookiejar.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain='.localhost',
domain_specified=True,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
self.cookiejar.set_cookie(cookie)
def reset(self):
"""
Resets the state of the application; currently just clears
saved cookies.
"""
self.cookiejar.clear()
def set_parser_features(self, parser_features):
"""
Changes the parser used by BeautifulSoup. See its documentation to
know the supported parsers.
"""
self.RequestClass.ResponseClass.parser_features = parser_features
def get(self, url, params=None, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a GET request given the url path.
:param params:
A query string, or a dictionary that will be encoded
into a query string. You may also include a URL query
string on the ``url``.
:param headers:
Extra headers to send.
:type headers:
dictionary
:param extra_environ:
Environmental variables that should be added to the request.
:type extra_environ:
dictionary
:param status:
The HTTP status code you expect in response (if not 200 or 3xx).
You can also use a wildcard, like ``'3*'`` or ``'*'``.
:type status:
integer or string
:param expect_errors:
If this is False, then if anything is written to
environ ``wsgi.errors`` it will be an error.
If it is True, then non-200/3xx responses are also okay.
:type expect_errors:
boolean
:param xhr:
If this is true, then marks response as ajax. The same as
headers={'X-REQUESTED-WITH': 'XMLHttpRequest', }
:type xhr:
boolean
:returns: :class:`webtest.TestResponse` instance.
"""
environ = self._make_environ(extra_environ)
url = str(url)
url = self._remove_fragment(url)
if params:
if not isinstance(params, string_types):
params = urlencode(params, doseq=True)
if str('?') in url:
url += str('&')
else:
url += str('?')
url += params
if str('?') in url:
url, environ['QUERY_STRING'] = url.split(str('?'), 1)
else:
environ['QUERY_STRING'] = str('')
req = self.RequestClass.blank(url, environ)
if xhr:
headers = self._add_xhr_header(headers)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def post(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a POST request. Similar to :meth:`~webtest.TestApp.get`.
:param params:
Are put in the body of the request. If params is a
iterator it will be urlencoded, if it is string it will not
be encoded, but placed in the body directly.
Can be a collections.OrderedDict with
:class:`webtest.forms.Upload` fields included::
app.post('/myurl', collections.OrderedDict([
('textfield1', 'value1'),
('uploadfield', webapp.Upload('filename.txt', 'contents'),
('textfield2', 'value2')])))
:param upload_files:
It should be a list of ``(fieldname, filename, file_content)``.
You can also use just ``(fieldname, filename)`` and the file
contents will be read from disk.
:type upload_files:
list
:param content_type:
HTTP content type, for example `application/json`.
:type content_type:
string
:param xhr:
If this is true, then marks response as ajax. The same as
headers={'X-REQUESTED-WITH': 'XMLHttpRequest', }
:type xhr:
boolean
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('POST', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type)
def put(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a PUT request. Similar to :meth:`~webtest.TestApp.post`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('PUT', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type,
)
def patch(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a PATCH request. Similar to :meth:`~webtest.TestApp.post`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('PATCH', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type)
def delete(self, url, params='', headers=None,
extra_environ=None, status=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a DELETE request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('DELETE', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors,
content_type=content_type)
def options(self, url, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a OPTIONS request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('OPTIONS', url, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors)
def head(self, url, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a HEAD request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('HEAD', url, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors)
post_json = utils.json_method('POST')
put_json = utils.json_method('PUT')
patch_json = utils.json_method('PATCH')
delete_json = utils.json_method('DELETE')
def encode_multipart(self, params, files):
"""
Encodes a set of parameters (typically a name/value list) and
a set of files (a list of (name, filename, file_body, mimetype)) into a
typical POST body, returning the (content_type, body).
"""
boundary = to_bytes(str(random.random()))[2:]
boundary = b'----------a_BoUnDaRy' + boundary + b'$'
lines = []
def _append_file(file_info):
key, filename, value, fcontent = self._get_file_info(file_info)
if isinstance(key, text_type):
try:
key = key.encode('ascii')
except: # pragma: no cover
raise # file name must be ascii
if isinstance(filename, text_type):
try:
filename = filename.encode('utf8')
except: # pragma: no cover
raise # file name must be ascii or utf8
if not fcontent:
fcontent = mimetypes.guess_type(filename.decode('utf8'))[0]
fcontent = to_bytes(fcontent)
fcontent = fcontent or b'application/octet-stream'
lines.extend([
b'--' + boundary,
b'Content-Disposition: form-data; ' +
b'name="' + key + b'"; filename="' + filename + b'"',
b'Content-Type: ' + fcontent, b'', value])
for key, value in params:
if isinstance(key, text_type):
try:
key = key.encode('ascii')
except: # pragma: no cover
raise # field name are always ascii
if isinstance(value, forms.File):
if value.value:
_append_file([key] + list(value.value))
elif isinstance(value, forms.Upload):
file_info = [key, value.filename]
if value.content is not None:
file_info.append(value.content)
if value.content_type is not None:
file_info.append(value.content_type)
_append_file(file_info)
else:
if isinstance(value, text_type):
value = value.encode('utf8')
lines.extend([
b'--' + boundary,
b'Content-Disposition: form-data; name="' + key + b'"',
b'', value])
for file_info in files:
_append_file(file_info)
lines.extend([b'--' + boundary + b'--', b''])
body = b'\r\n'.join(lines)
boundary = boundary.decode('ascii')
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def request(self, url_or_req, status=None, expect_errors=False,
**req_params):
"""
Creates and executes a request. You may either pass in an
instantiated :class:`TestRequest` object, or you may pass in a
URL and keyword arguments to be passed to
:meth:`TestRequest.blank`.
You can use this to run a request without the intermediary
functioning of :meth:`TestApp.get` etc. For instance, to
test a WebDAV method::
resp = app.request('/new-col', method='MKCOL')
Note that the request won't have a body unless you specify it,
like::
resp = app.request('/test.txt', method='PUT', body='test')
You can use :class:`webtest.TestRequest`::
req = webtest.TestRequest.blank('/url/', method='GET')
resp = app.do_request(req)
"""
if isinstance(url_or_req, text_type):
url_or_req = str(url_or_req)
for (k, v) in req_params.items():
if isinstance(v, text_type):
req_params[k] = str(v)
if isinstance(url_or_req, string_types):
req = self.RequestClass.blank(url_or_req, **req_params)
else:
req = url_or_req.copy()
for name, value in req_params.items():
setattr(req, name, value)
req.environ['paste.throw_errors'] = True
for name, value in self.extra_environ.items():
req.environ.setdefault(name, value)
return self.do_request(req,
status=status,
expect_errors=expect_errors,
)
def do_request(self, req, status=None, expect_errors=None):
"""
Executes the given webob Request (``req``), with the expected
``status``. Generally :meth:`~webtest.TestApp.get` and
:meth:`~webtest.TestApp.post` are used instead.
To use this::
req = webtest.TestRequest.blank('url', ...args...)
resp = app.do_request(req)
.. note::
You can pass any keyword arguments to
``TestRequest.blank()``, which will be set on the request.
These can be arguments like ``content_type``, ``accept``, etc.
"""
errors = StringIO()
req.environ['wsgi.errors'] = errors
script_name = req.environ.get('SCRIPT_NAME', '')
if script_name and req.path_info.startswith(script_name):
req.path_info = req.path_info[len(script_name):]
# set framework hooks
req.environ['paste.testing'] = True
req.environ['paste.testing_variables'] = {}
# set request cookies
self.cookiejar.add_cookie_header(utils._RequestCookieAdapter(req))
# verify wsgi compatibility
app = lint.middleware(self.app) if self.lint else self.app
## FIXME: should it be an option to not catch exc_info?
res = req.get_response(app, catch_exc_info=True)
# be sure to decode the content
res.decode_content()
# set a few handy attributes
res._use_unicode = self.use_unicode
res.request = req
res.app = app
res.test_app = self
# We do this to make sure the app_iter is exausted:
try:
res.body
except TypeError: # pragma: no cover
pass
res.errors = errors.getvalue()
for name, value in req.environ['paste.testing_variables'].items():
if hasattr(res, name):
raise ValueError(
"paste.testing_variables contains the variable %r, but "
"the response object already has an attribute by that "
"name" % name)
setattr(res, name, value)
if not expect_errors:
self._check_status(status, res)
self._check_errors(res)
# merge cookies back in
self.cookiejar.extract_cookies(utils._ResponseCookieAdapter(res),
utils._RequestCookieAdapter(req))
return res
def _check_status(self, status, res):
if status == '*':
return
res_status = res.status
if (isinstance(status, string_types) and '*' in status):
if re.match(fnmatch.translate(status), res_status, re.I):
return
if isinstance(status, string_types):
if status == res_status:
return
if isinstance(status, (list, tuple)):
if res.status_int not in status:
raise AppError(
"Bad response: %s (not one of %s for %s)\n%s",
res_status, ', '.join(map(str, status)),
res.request.url, res)
return
if status is None:
if res.status_int >= 200 and res.status_int < 400:
return
raise AppError(
"Bad response: %s (not 200 OK or 3xx redirect for %s)\n%s",
res_status, res.request.url,
res)
if status != res.status_int:
raise AppError(
"Bad response: %s (not %s)", res_status, status)
def _check_errors(self, res):
errors = res.errors
if errors:
raise AppError(
"Application had errors logged:\n%s", errors)
def _make_environ(self, extra_environ=None):
environ = self.extra_environ.copy()
environ['paste.throw_errors'] = True
if extra_environ:
environ.update(extra_environ)
return environ
def _remove_fragment(self, url):
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, query, ""))
def _gen_request(self, method, url, params=utils.NoDefault,
headers=None, extra_environ=None, status=None,
upload_files=None, expect_errors=False,
content_type=None):
"""
Do a generic request.
"""
environ = self._make_environ(extra_environ)
inline_uploads = []
# this supports OrderedDict
if isinstance(params, dict) or hasattr(params, 'items'):
params = list(params.items())
if isinstance(params, (list, tuple)):
inline_uploads = [v for (k, v) in params
if isinstance(v, (forms.File, forms.Upload))]
if len(inline_uploads) > 0:
content_type, params = self.encode_multipart(
params, upload_files or ())
environ['CONTENT_TYPE'] = content_type
else:
params = utils.encode_params(params, content_type)
if upload_files or \
(content_type and
to_bytes(content_type).startswith(b'multipart')):
params = urlparse.parse_qsl(params, keep_blank_values=True)
content_type, params = self.encode_multipart(
params, upload_files or ())
environ['CONTENT_TYPE'] = content_type
elif params:
environ.setdefault('CONTENT_TYPE',
str('application/x-www-form-urlencoded'))
if content_type is not None:
environ['CONTENT_TYPE'] = content_type
environ['REQUEST_METHOD'] = str(method)
url = str(url)
url = self._remove_fragment(url)
req = self.RequestClass.blank(url, environ)
if isinstance(params, text_type):
params = params.encode(req.charset or 'utf8')
req.environ['wsgi.input'] = BytesIO(params)
req.content_length = len(params)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def _get_file_info(self, file_info):
if len(file_info) == 2:
# It only has a filename
filename = file_info[1]
if self.relative_to:
filename = os.path.join(self.relative_to, filename)
f = open(filename, 'rb')
content = f.read()
f.close()
return (file_info[0], filename, content, None)
elif 3 <= len(file_info) <= 4:
content = file_info[2]
if not isinstance(content, binary_type):
raise ValueError('File content must be %s not %s'
% (binary_type, type(content)))
if len(file_info) == 3:
return tuple(file_info) + (None,)
else:
return file_info
else:
raise ValueError(
"upload_files need to be a list of tuples of (fieldname, "
"filename, filecontent, mimetype) or (fieldname, "
"filename, filecontent) or (fieldname, filename); "
"you gave: %r"
% repr(file_info)[:100])
@staticmethod
def _add_xhr_header(headers):
headers = headers or {}
# if remove str we will be have an error in lint.middleware
headers.update({'X-REQUESTED-WITH': str('XMLHttpRequest')})
return headers
|
hongchaodeng/contrib
|
refs/heads/master
|
hack/verify-flags-underscore.py
|
34
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
if 'vendor' in dirs:
dirs.remove('vendor')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'vendor', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
|
michael-borisov/django-social-auth
|
refs/heads/master
|
social_auth/backends/contrib/belgiumeid.py
|
14
|
from social.backends.belgiumeid import BelgiumEIDOpenId as EIDBackend
|
mtasende/BLAS_for_Parallella
|
refs/heads/master
|
blis-master/windows/build/gen-check-rev-file.py
|
6
|
#! /usr/bin/env python
#
# BLIS
# An object-based framework for developing high-performance BLAS-like
# libraries.
#
# Copyright (C) 2014, The University of Texas at Austin
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of The University of Texas at Austin nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# ------------------------------------------------------------------------------
# Import modules
import sys
import os
import os.path
import getopt
# Global variables for command line options, with default settings.
script_name = ""
verbose_flag = False
# Global constants
toplevel_dirpath = "."
svn_dirname = ".svn"
entries_filename = "entries"
revision_filename = "revision"
dummy_rev_string = "unknown"
# ------------------------------------------------------------------------------
def print_usage():
# Print help information.
print " "
print " %s" % script_name
print " "
print " Field G. Van Zee"
print " "
print " This script ensures that a revision file exists so nmake can include the"
print " revision number in the subdirectory paths to the build products."
print " "
print " If a .svn directory exists, the revision file is created (or updated)"
print " to contain the revision number contained in .svn\entries file."
print " Otherwise, if a .svn directory does not exist, the revision file is"
print " left untouched if it exists, and created with a dummy value if it does"
print " not."
print " "
print " This script is typically invoked by configure.cmd, but it can also be"
print " run manually."
print " "
print " Usage:"
print " %s" % script_name
print " "
print " The following options are accepted:"
print " "
print " -v verbose"
print " Be verbose. Output what's happening."
print " "
# Exit the script.
sys.exit()
# ------------------------------------------------------------------------------
def main():
# Extern our global veriables.
global script_name
global verbose_flag
# Get the script name so we can use it in our output.
( script_dir, script_name ) = os.path.split( sys.argv[0] )
try:
# Get the command line options.
options, args = getopt.getopt( sys.argv[1:], "v")
except getopt.GetoptError, err:
# print help information and exit:
print str( err ) # will print something like "option -a not recognized"
print_usage()
# Parse our expected command line options.
for o, a in options:
if o == "-v":
verbose_flag = True
else:
assert False, "unhandled option"
# Check the number of arguments after command line option processing.
n_args = len( args )
if n_args != 0:
print_usage()
# Construct the filepaths to the entries and revision files.
entries_filepath = os.path.join( toplevel_dirpath, svn_dirname, entries_filename )
revision_filepath = os.path.join( toplevel_dirpath, revision_filename )
# Test for the existence of the entries file (and by proxy, a working copy).
entries_file_exists = file_exists( entries_filepath )
# If the entries file exists, we are in a working copy, and thus we can
# overwrite the revision file with a potentially new value.
if entries_file_exists == True:
# Read the revision number from the entries file.
rev_num_str = read_revision_from_entries( entries_filepath )
# Be verbose if verbosity was requested.
if verbose_flag == True:
print "%s: Found working copy; writing revision string \"%s\" to %s" % ( script_name, rev_num_str, revision_filepath )
# Write the revision number to the revision file.
write_revision_to_file( rev_num_str, revision_filepath )
# If we can't find the entries file, we probably are in an exported
# copy: either an official snapshot, or a copy that someone exported
# manually--hopefully (and likely) the former.
else:
# Be verbose if verbosity was requested.
if verbose_flag == True:
print "%s: Found export. Checking for revision file..." % ( script_name )
# Test for the existence of the revision file.
rev_file_exists = file_exists( revision_filepath )
# If the revision file does not exist, create a dummy file so the
# configure script has something to work with.
if rev_file_exists == False:
# Be verbose if verbosity was requested.
if verbose_flag == True:
print "%s: Revision file not found. Writing dummy revision string \"%s\" to %s" % ( script_name, dummy_rev_string, revision_filepath )
# Write the dummy string to the revision file.
write_revision_to_file( dummy_rev_string, revision_filepath )
else:
# Get the revision number from the file just for the purposes of
# being verbose, if it was requested.
rev_num_str = read_revision_file( revision_filepath )
# Be verbose if verbosity was requested.
if verbose_flag == True:
print "%s: Revision file found containing revision string \"%s\". Export is valid snapshot!" % ( script_name, rev_num_str )
# ------------------------------------------------------------------------------
def file_exists( filepath ):
# Try to open the file read-only.
try:
fp = open( filepath, 'r' )
fp.close()
exists = True
except IOError, err:
exists = False
return exists
# ------------------------------------------------------------------------------
def read_revision_from_entries( entries_filepath ):
# Open the ignore list files as read-only.
entries_file = open( entries_filepath, 'r' )
# Read all lines in the entries file.
raw_list = entries_file.readlines()
# Close the file.
entries_file.close()
# Grab the fourth line, which is where the revision number lives, and strip
# it of whitespace (probably just a newline).
rev_num_str = raw_list[3].strip()
# Return the revision number string.
return rev_num_str
# ------------------------------------------------------------------------------
def write_revision_to_file( rev_string, revision_filepath ):
# Open the revision file for writing.
revision_file = open( revision_filepath, 'w' )
# Write the revision string to the file.
revision_file.write( rev_string )
# Close the file.
revision_file.close()
# ------------------------------------------------------------------------------
def read_revision_file( revision_filepath ):
# Open the revision file.
revision_file = open( revision_filepath, 'r' )
# Read the first (and only) line.
line = revision_file.readline()
# Close the file.
revision_file.close()
# Grab the string and strip the it of whitespace (should just be a newline).
rev_num_str = line.strip()
# Return the revision number string.
return rev_num_str
# ------------------------------------------------------------------------------
# Begin by executing main().
main()
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Scrapy-1.0.1/scrapy/core/__init__.py
|
216
|
"""
Scrapy core library classes and functions.
"""
|
gjhiggins/sprox
|
refs/heads/master
|
sprox/dummyentity.py
|
3
|
class DummyEntity:pass
|
rmed/blahrgbot
|
refs/heads/master
|
blahrgbot/helper.py
|
1
|
# -*- coding: utf-8 -*-
#
# blahrgbot
# https://github.com/rmed/blahrgbot
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rafael Medina García <rafamedgar@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Helper functions."""
from blahrgbot.conf import DB, SCREAM
# { 'filename': 'file.ogg', 'file_id': '1234', 'desc': 'description' }
def db_field_exists(field, value):
"""Check if a field with the given value exists."""
result = DB.get(SCREAM[field] == value)
if result:
return True
return False
def db_get_all():
"""Return file id and description of all clips.
Returns:
List with (id, description) tuples
"""
return [(a['file_id'], a['desc']) for a in DB.all()]
def db_get_file_id(clip_name):
"""Obtain file_id for a specific clip."""
result = DB.get(SCREAM.filename == clip_name)
if not result:
return None
return result['file_id']
def db_set_file_id(clip_name, file_id, desc='NO_DESC'):
"""Update or insert the file ID of a clip."""
exists = db_get_file_id(clip_name)
if exists:
DB.update({'file_id': file_id}, (SCREAM.filename == clip_name))
else:
DB.insert({'filename': clip_name, 'file_id': file_id, 'desc': desc})
|
lsaffre/lino
|
refs/heads/master
|
lino/modlib/notify/consumers2.py
|
2
|
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
class LinoConsumer(WebsocketConsumer):
def connect(self):
self.accept()
if self.scope.get('user', False):
username = self.scope["user"].username
async_to_sync(self.channel_layer.group_add)(username, self.channel_name)
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
self.send(text_data=json.dumps({
'message': message
}))
def send_notification(self, text):
self.send(text_data=text['text'])
|
david30907d/feedback_django
|
refs/heads/master
|
example/spirit/comment/flag/__init__.py
|
12
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
default_app_config = 'spirit.comment.flag.apps.SpiritCommentFlagConfig'
|
IshankGulati/scikit-learn
|
refs/heads/master
|
sklearn/utils/tests/test_metaestimators.py
|
86
|
from sklearn.utils.testing import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
|
KokareIITP/django
|
refs/heads/master
|
django/core/management/commands/inspectdb.py
|
108
|
from __future__ import unicode_literals
import keyword
import re
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.')
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Make sure each ForeignKey has `on_delete` set to the desidered behavior."
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
used_column_names = [] # Holds column names used in the table so far
for row in connection.introspection.get_table_description(cursor, table_name):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[column_name][1] == table_name else table2model(relations[column_name][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if field_type.startswith('ForeignKey('):
field_desc += ', models.DO_NOTHING'
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" % c for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
|
katstalk/android_external_chromium_org
|
refs/heads/kk44
|
tools/cr/cr/base/client.py
|
23
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Client configuration management.
This module holds the code for detecting and configuring the current client and
it's output directories.
It is responsible for writing out the client specific plugins that tell the
rest of the cr tool what the client is capable of.
"""
import os
import sys
import cr
import cr.auto.build
import cr.auto.client
# The config version currently supported.
VERSION = 0.5
# The default directory name to store config inside
CLIENT_CONFIG_PATH = '.cr'
# The partial filename to add to a directory to get it's config file.
CLIENT_CONFIG_FILE = os.path.join(CLIENT_CONFIG_PATH, 'config.py')
# The format string for the header of a config file.
CONFIG_FILE_PREFIX = """
# This is an autogenerated file
# it *will* be overwritten, and changes may lost
# The system will autoload any other python file in the same folder.
import cr
OVERRIDES = cr.Config.From("""
# The format string for each value in a config file.
CONFIG_VAR_LINE = '\n {0} = {1!r},'
# The format string for the tail of a config file.
CONFIG_FILE_SUFFIX = '\n)\n'
# The name of the gclient config file
GCLIENT_FILENAME = '.gclient'
# The default config values installed by this module.
DEFAULT = cr.Config.From(
CR_ROOT_PATH=os.path.join('{GOOGLE_CODE}'),
CR_CLIENT_PATH=os.path.join('{CR_ROOT_PATH}', '{CR_CLIENT_NAME}'),
CR_SRC=os.path.join('{CR_CLIENT_PATH}', 'src'),
CR_BUILD_DIR=os.path.join('{CR_SRC}', '{CR_OUT_FULL}'),
)
# Config values determined at run time by this module.
DETECTED = cr.Config.From(
CR_CLIENT_PATH=lambda context: _DetectPath(),
# _DetectName not declared yet so pylint: disable=unnecessary-lambda
CR_CLIENT_NAME=lambda context: _DetectName(context),
)
_cached_path = None
_cached_name = None
def _DetectPath():
"""A dynamic value function that tries to detect the current client."""
global _cached_path
if _cached_path is not None:
return _cached_path
# See if we can detect the source tree root
_cached_path = os.getcwd()
while (_cached_path and
not os.path.exists(os.path.join(_cached_path, GCLIENT_FILENAME))):
old = _cached_path
_cached_path = os.path.dirname(_cached_path)
if _cached_path == old:
_cached_path = None
if _cached_path is not None:
dirname, basename = os.path.split(_cached_path)
if basename == 'src':
# we have the src path, base is one level up
_cached_path = dirname
if _cached_path is None:
_cached_path = cr.visitor.HIDDEN
return _cached_path
def _DetectName(context):
"""A dynamic value function that works out the name of the current client."""
global _cached_name
if _cached_name is not None:
return _cached_name
_cached_name = 'chromium'
path = context.Get('CR_CLIENT_PATH')
if path is None:
return
_cached_name = os.path.basename(path)
return _cached_name
def _GetConfigFilename(path):
return os.path.realpath(os.path.join(path, CLIENT_CONFIG_FILE))
def _IsOutputDir(path):
return os.path.isfile(_GetConfigFilename(path))
def _WriteConfig(writer, data):
writer.write(CONFIG_FILE_PREFIX)
for key, value in data.items():
writer.write(CONFIG_VAR_LINE.format(key, value))
writer.write(CONFIG_FILE_SUFFIX)
def AddArguments(parser):
parser.add_argument(
'-o', '--out', dest='_out', metavar='name',
default=None,
help='The name of the out directory to use. Overrides CR_OUT.'
)
def GetOutArgument(context):
return getattr(context.args, '_out', None)
def ApplyOutArgument(context):
# TODO(iancottrell): be flexible, allow out to do approximate match...
out = GetOutArgument(context)
if out:
context.derived.Set(CR_OUT_FULL=out)
def ReadGClient(context):
"""Loads the .gclient configuration for the current client.
This will load from CR_CLIENT_PATH.
Args:
context: The active context to load configuratin for.
Returns:
The dict of values set in the .gclient file.
"""
# Now attempt to load and parse the .gclient file
result = {}
try:
gclient_file = context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
with open(gclient_file, 'r') as spec_file:
# matching the behaviour of gclient, so pylint: disable=exec-used
exec(spec_file.read(), {}, result)
except IOError:
# no .gclient file, skip it
pass
return result
def LoadConfig(context):
"""Loads the client configuration for the given context.
This will load configuration if present from CR_CLIENT_PATH and then
CR_BUILD_DIR.
Args:
context: The active context to load configuratin for.
Returns:
True if configuration was fully loaded.
"""
# Load the root config, will help set default build dir
client_path = context.Find('CR_CLIENT_PATH')
if not client_path:
return False
cr.auto.client.__path__.append(os.path.join(client_path, CLIENT_CONFIG_PATH))
cr.loader.Scan()
# Now load build dir config
build_dir = context.Find('CR_BUILD_DIR')
if not build_dir:
return False
cr.auto.build.__path__.append(os.path.join(build_dir, CLIENT_CONFIG_PATH))
cr.loader.Scan()
return hasattr(cr.auto.build, 'config')
def WriteConfig(context, path, data):
"""Writes a configuration out to a file.
This writes all the key value pairs in data out to a config file below path.
Args:
context: The context to run under.
path: The base path to write the config plugin into.
data: The key value pairs to write.
"""
filename = _GetConfigFilename(path)
config_dir = os.path.dirname(filename)
if context.dry_run:
print 'makedirs', config_dir
print 'Write config to', filename
_WriteConfig(sys.stdout, data)
else:
try:
os.makedirs(config_dir)
except OSError:
if not os.path.isdir(config_dir):
raise
with open(filename, 'w') as writer:
_WriteConfig(writer, data)
def PrintInfo(context):
print 'Selected output directory is', context.Find('CR_BUILD_DIR')
try:
for name in cr.auto.build.config.OVERRIDES.exported.keys():
print ' ', name, '=', context.Get(name)
except AttributeError:
pass
|
bxlab/HiFive_Paper
|
refs/heads/master
|
Scripts/MB/Matrix_Balancing.py
|
1
|
import sys
import numpy
def BNEWT(A, tol=1e-6, x0=None, delta=0.1, Delta=3, fl=False):
# Initialize values
res = []
n = A.shape[0]
e = numpy.ones((n, 1), dtype=numpy.float64)
if x0 is None:
x0=numpy.copy(e)
g = 0.9
eta = etamax = 0.1
stop_tol = tol * 0.5
x = numpy.copy(x0)
rt = tol ** 2.0
v = x * numpy.dot(A, x)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rho_km2 = rho_km1
rold = rout = rho_km1
i = MVP = 0
if fl:
print >> sys.stderr, ("it in. it res stop cur\n"),
# Outer iteration
while rout > rt:
i += 1
k = 0
y = numpy.copy(e)
innertol = max(eta ** 2.0 * rout, rt)
# Inner iteration by CG
while rho_km1 > innertol:
k += 1
if k == 1:
Z = rk / v
p = numpy.copy(Z)
rho_km1 = numpy.dot(rk.T, Z)
else:
beta = rho_km1 / rho_km2
p = Z + beta * p
# Update search direction efficiently
w = x * numpy.dot(A, x * p) + v * p
alpha = rho_km1 / numpy.dot(p.T, w)[0, 0]
ap = alpha * p
# Test distance to boundary of cone
ynew = y + ap
if numpy.amin(ynew) <= delta:
if delta == 0:
break
ind = numpy.where(ap < 0.0)[0]
gamma = numpy.amin((delta - y[ind]) / ap[ind])
y += gamma * ap
break
if numpy.amax(ynew) >= Delta:
ind = numpy.where(ynew > Delta)[0]
gamma = numpy.amin((Delta - y[ind]) / ap[ind])
y += gamma * ap
break
y = numpy.copy(ynew)
rk -= alpha * w
rho_km2 = rho_km1
Z = rk / v
rho_km1 = numpy.dot(rk.T, Z)[0, 0]
x *= y
v = x * numpy.dot(A, x)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rout = rho_km1
MVP += k + 1
# Update inner iteration stopping criterion
rat = rout / rold
rold = rout
res_norm = rout ** 0.5
eta_o = eta
eta = g * rat
if g * eta_o ** 2.0 > 0.1:
eta = max(eta, g * eta_o ** 2.0)
eta = max(min(eta, etamax), stop_tol / res_norm)
if fl:
print >> sys.stderr, ("%03i %06i %03.3f %e %e\n") % (i, k, res_norm, rt, rout),
res.append(res_norm)
if fl:
print >> sys.stderr, ("Matrix-vector products = %06i\n") % (MVP),
return [x, res]
def BNEWT_sparse(A, tol=1e-6, x0=None, delta=0.1, Delta=3, fl=False):
# A should be an n by 3 array of index1, index2, count
# Initialize values
n = numpy.amax(A[:, :2]) + 1
res = []
e = numpy.ones((n, 1), dtype=numpy.float64)
if x0 is None:
x0=numpy.copy(e)
g = 0.9
eta = etamax = 0.1
stop_tol = tol * 0.5
x = numpy.copy(x0)
rt = tol ** 2.0
corr = (A[:, 2:3] * x[A[:, 0]] * x[A[:, 1]]).reshape(-1)
v = numpy.bincount(A[:, 0], weights=corr, minlength=n).reshape(-1, 1)
v += numpy.bincount(A[:, 1], weights=corr, minlength=n).reshape(-1, 1)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rho_km2 = rho_km1
rold = rout = rho_km1
i = MVP = 0
if fl:
print >> sys.stderr, ("it in. it res stop cur\n"),
# Outer iteration
while rout > rt:
i += 1
k = 0
y = numpy.copy(e)
innertol = max(eta ** 2.0 * rout, rt)
# Inner iteration by CG
while rho_km1 > innertol:
k += 1
if k == 1:
Z = rk / v
p = numpy.copy(Z)
rho_km1 = numpy.dot(rk.T, Z)
else:
beta = rho_km1 / rho_km2
p = Z + beta * p
# Update search direction efficiently
corr = A[:, 2:3] * x[A[:, 0]] * x[A[:, 1]]
w = numpy.bincount(A[:, 0], weights=(corr * p[A[:, 1]]).reshape(-1), minlength=n).reshape(-1, 1)
w += numpy.bincount(A[:, 1], weights=(corr * p[A[:, 0]]).reshape(-1), minlength=n).reshape(-1, 1)
w += v * p
alpha = rho_km1 / numpy.dot(p.T, w)[0, 0]
ap = alpha * p
# Test distance to boundary of cone
ynew = y + ap
if numpy.amin(ynew) <= delta:
if delta == 0:
break
ind = numpy.where(ap < 0.0)[0]
gamma = numpy.amin((delta - y[ind]) / ap[ind])
y += gamma * ap
break
if numpy.amax(ynew) >= Delta:
ind = numpy.where(ynew > Delta)[0]
gamma = numpy.amin((Delta - y[ind]) / ap[ind])
y += gamma * ap
break
y = numpy.copy(ynew)
rk -= alpha * w
rho_km2 = rho_km1
Z = rk / v
rho_km1 = numpy.dot(rk.T, Z)[0, 0]
x *= y
corr = (A[:, 2:3] * x[A[:, 0]] * x[A[:, 1]]).reshape(-1)
v = numpy.bincount(A[:, 0], weights=corr, minlength=n).reshape(-1, 1)
v += numpy.bincount(A[:, 1], weights=corr, minlength=n).reshape(-1, 1)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rout = rho_km1
MVP += k + 1
# Update inner iteration stopping criterion
rat = rout / rold
rold = rout
res_norm = rout ** 0.5
eta_o = eta
eta = g * rat
if g * eta_o ** 2.0 > 0.1:
eta = max(eta, g * eta_o ** 2.0)
eta = max(min(eta, etamax), stop_tol / res_norm)
if fl:
print >> sys.stderr, ("%03i %06i %03.3f %e %e\n") % (i, k, res_norm, rt, rout),
res.append(res_norm)
if fl:
print >> sys.stderr, ("Matrix-vector products = %06i\n") % (MVP),
return [x, res]
def BNEWT_sparse_binary(A, tol=1e-6, x0=None, delta=0.1, Delta=3, fl=False):
# A should be an n by 3 array of index1, index2, count
# Initialize values
n = numpy.amax(A[:, :2]) + 1
res = []
e = numpy.ones((n, 1), dtype=numpy.float64)
if x0 is None:
x0=numpy.copy(e)
g = 0.9
eta = etamax = 0.1
stop_tol = tol * 0.5
x = numpy.copy(x0)
rt = tol ** 2.0
corr = (x[A[:, 0]] * x[A[:, 1]]).reshape(-1)
v = numpy.bincount(A[:, 0], weights=corr, minlength=n).reshape(-1, 1)
v += numpy.bincount(A[:, 1], weights=corr, minlength=n).reshape(-1, 1)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rho_km2 = rho_km1
rold = rout = rho_km1
i = MVP = 0
if fl:
print >> sys.stderr, ("it in. it res stop cur\n"),
# Outer iteration
while rout > rt:
i += 1
k = 0
y = numpy.copy(e)
innertol = max(eta ** 2.0 * rout, rt)
# Inner iteration by CG
while rho_km1 > innertol:
k += 1
if k == 1:
Z = rk / v
p = numpy.copy(Z)
rho_km1 = numpy.dot(rk.T, Z)
else:
beta = rho_km1 / rho_km2
p = Z + beta * p
# Update search direction efficiently
corr = x[A[:, 0]] * x[A[:, 1]]
w = numpy.bincount(A[:, 0], weights=(corr * p[A[:, 1]]).reshape(-1), minlength=n).reshape(-1, 1)
w += numpy.bincount(A[:, 1], weights=(corr * p[A[:, 0]]).reshape(-1), minlength=n).reshape(-1, 1)
w += v * p
alpha = rho_km1 / numpy.dot(p.T, w)[0, 0]
ap = alpha * p
# Test distance to boundary of cone
ynew = y + ap
if numpy.amin(ynew) <= delta:
if delta == 0:
break
ind = numpy.where(ap < 0.0)[0]
gamma = numpy.amin((delta - y[ind]) / ap[ind])
y += gamma * ap
break
if numpy.amax(ynew) >= Delta:
ind = numpy.where(ynew > Delta)[0]
gamma = numpy.amin((Delta - y[ind]) / ap[ind])
y += gamma * ap
break
y = numpy.copy(ynew)
rk -= alpha * w
rho_km2 = rho_km1
Z = rk / v
rho_km1 = numpy.dot(rk.T, Z)[0, 0]
x *= y
corr = (x[A[:, 0]] * x[A[:, 1]]).reshape(-1)
v = numpy.bincount(A[:, 0], weights=corr, minlength=n).reshape(-1, 1)
v += numpy.bincount(A[:, 1], weights=corr, minlength=n).reshape(-1, 1)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rout = rho_km1
MVP += k + 1
# Update inner iteration stopping criterion
rat = rout / rold
rold = rout
res_norm = rout ** 0.5
eta_o = eta
eta = g * rat
if g * eta_o ** 2.0 > 0.1:
eta = max(eta, g * eta_o ** 2.0)
eta = max(min(eta, etamax), stop_tol / res_norm)
if fl:
print >> sys.stderr, ("%03i %06i %03.3f %e %e\n") % (i, k, res_norm, rt, rout),
res.append(res_norm)
if fl:
print >> sys.stderr, ("Matrix-vector products = %06i\n") % (MVP),
return [x, res]
|
thenetcircle/dino
|
refs/heads/master
|
dino/rest/resources/history.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from datetime import datetime
from flask import request
from functools import lru_cache
from dino.rest.resources.base import BaseResource
from dino.admin.orm import storage_manager
from dino.utils import b64e
from dino.utils.decorators import timeit
logger = logging.getLogger(__name__)
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class HistoryResource(BaseResource):
def __init__(self):
super(HistoryResource, self).__init__()
self.last_cleared = datetime.utcnow()
self.request = request
def _get_lru_method(self):
return self.do_get_with_params
def _get_last_cleared(self):
return self.last_cleared
def _set_last_cleared(self, last_cleared):
self.last_cleared = last_cleared
@lru_cache()
def do_get_with_params(self, room_id, user_id, from_time, to_time):
msgs, from_time, to_time = storage_manager.find_history(room_id, user_id, from_time, to_time)
return msgs
@timeit(logger, 'on_rest_history')
def do_get(self):
the_json = self.validate_json()
logger.debug('GET request: %s' % str(the_json))
room_id = the_json.get('room_id', '')
user_id = the_json.get('user_id')
from_time = the_json.get('from_time')
to_time = the_json.get('to_time')
try:
messages = self.do_get_with_params(room_id, user_id, from_time, to_time)
for message in messages:
message['from_user_name'] = b64e(message['from_user_name'])
message['body'] = b64e(message['body'])
message['target_name'] = b64e(message['target_name'])
message['channel_name'] = b64e(message['channel_name'])
return messages
except Exception as e:
logger.error('could not get messages: %s' % str(e))
raise e
def validate_json(self):
try:
the_json = self.request.get_json(silent=True)
except Exception as e:
logger.error('error: %s' % str(e))
logger.exception(traceback.format_exc())
raise ValueError('invalid json')
if the_json is None:
logger.error('empty request body')
raise ValueError('empty request body')
return the_json
|
wizmer/NeuroM
|
refs/heads/master
|
examples/histogram.py
|
2
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple Histogram function for multiple neurons."""
from itertools import chain
import numpy as np
from neurom.view import common
def histogram(neurons, feature, new_fig=True, subplot=False, normed=False, **kwargs):
"""
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
neurons : list
List of Neurons. Single neurons must be encapsulated in a list.
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
"""
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
feature_values = [getattr(neu, 'get_' + feature)() for neu in neurons]
neu_labels = [neu.name for neu in neurons]
ax.hist(feature_values, bins=bins, cumulative=cumulative, label=neu_labels, normed=normed)
kwargs['no_legend'] = len(neu_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
def population_feature_values(pops, feature):
"""Extracts feature values per population
"""
pops_feature_values = []
for pop in pops:
feature_values = [getattr(neu, 'get_' + feature)() for neu in pop.neurons]
# ugly hack to chain in case of list of lists
if any([isinstance(p, (list, np.ndarray)) for p in feature_values]):
feature_values = list(chain(*feature_values))
pops_feature_values.append(feature_values)
return pops_feature_values
def population_histogram(pops, feature, new_fig=True, normed=False, subplot=False, **kwargs):
"""
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
populations : populations list
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
"""
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
pops_feature_values = population_feature_values(pops, feature)
pops_labels = [pop.name for pop in pops]
ax.hist(pops_feature_values, bins=bins, cumulative=cumulative, label=pops_labels, normed=normed)
kwargs['no_legend'] = len(pops_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
common/lib/xmodule/xmodule/tests/test_course_module.py
|
11
|
"""Tests the course modules and their functions"""
import ddt
import unittest
from datetime import datetime, timedelta
from dateutil import parser
import itertools
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from pytz import utc
from xblock.runtime import KvsFieldData, DictKeyValueStore
import xmodule.course_module
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
from opaque_keys.edx.keys import CourseKey
ORG = 'test_org'
COURSE = 'test_course'
NOW = datetime.strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00').replace(tzinfo=utc)
_TODAY = datetime.now(utc)
_LAST_WEEK = _TODAY - timedelta(days=7)
_NEXT_WEEK = _TODAY + timedelta(days=7)
class CourseFieldsTestCase(unittest.TestCase):
shard = 1
def test_default_start_date(self):
self.assertEqual(
xmodule.course_module.CourseFields.start.default,
datetime(2030, 1, 1, tzinfo=utc)
)
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[],
load_error_modules=load_error_modules)
course_id = CourseKey.from_string('/'.join([ORG, COURSE, 'test_run']))
course_dir = "test_dir"
error_tracker = Mock()
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=course_id,
course_dir=course_dir,
error_tracker=error_tracker,
load_error_modules=load_error_modules,
field_data=KvsFieldData(DictKeyValueStore()),
)
def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None, end=None, certs='end'):
"""Get a dummy course"""
system = DummySystem(load_error_modules=True)
def to_attrb(n, v):
return '' if v is None else '{0}="{1}"'.format(n, v).lower()
is_new = to_attrb('is_new', is_new)
announcement = to_attrb('announcement', announcement)
advertised_start = to_attrb('advertised_start', advertised_start)
end = to_attrb('end', end)
start_xml = '''
<course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
graceperiod="1 day" url_name="test"
start="{start}"
{announcement}
{is_new}
{advertised_start}
{end}
certificates_display_behavior="{certs}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
'''.format(
org=ORG,
course=COURSE,
start=start,
is_new=is_new,
announcement=announcement,
advertised_start=advertised_start,
end=end,
certs=certs,
)
return system.process_xml(start_xml)
class HasEndedMayCertifyTestCase(unittest.TestCase):
"""Double check the semantics around when to finalize courses."""
shard = 1
def setUp(self):
super(HasEndedMayCertifyTestCase, self).setUp()
system = DummySystem(load_error_modules=True)
#sample_xml = """
# <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
# graceperiod="1 day" url_name="test"
# start="2012-01-01T12:00"
# {end}
# certificates_show_before_end={cert}>
# <chapter url="hi" url_name="ch" display_name="CH">
# <html url_name="h" display_name="H">Two houses, ...</html>
# </chapter>
# </course>
#""".format(org=ORG, course=COURSE)
past_end = (datetime.now() - timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
future_end = (datetime.now() + timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
self.past_show_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_with_info')
self.past_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_no_info')
self.past_noshow_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='end')
self.future_show_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_with_info')
self.future_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_no_info')
self.future_noshow_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='end')
#self.past_show_certs = system.process_xml(sample_xml.format(end=past_end, cert=True))
#self.past_noshow_certs = system.process_xml(sample_xml.format(end=past_end, cert=False))
#self.future_show_certs = system.process_xml(sample_xml.format(end=future_end, cert=True))
#self.future_noshow_certs = system.process_xml(sample_xml.format(end=future_end, cert=False))
def test_has_ended(self):
"""Check that has_ended correctly tells us when a course is over."""
self.assertTrue(self.past_show_certs.has_ended())
self.assertTrue(self.past_show_certs_no_info.has_ended())
self.assertTrue(self.past_noshow_certs.has_ended())
self.assertFalse(self.future_show_certs.has_ended())
self.assertFalse(self.future_show_certs_no_info.has_ended())
self.assertFalse(self.future_noshow_certs.has_ended())
def test_may_certify(self):
"""Check that may_certify correctly tells us when a course may wrap."""
self.assertTrue(self.past_show_certs.may_certify())
self.assertTrue(self.past_noshow_certs.may_certify())
self.assertTrue(self.past_show_certs_no_info.may_certify())
self.assertTrue(self.future_show_certs.may_certify())
self.assertTrue(self.future_show_certs_no_info.may_certify())
self.assertFalse(self.future_noshow_certs.may_certify())
class CourseSummaryHasEnded(unittest.TestCase):
""" Test for has_ended method when end date is missing timezone information. """
shard = 1
def test_course_end(self):
test_course = get_dummy_course("2012-01-01T12:00")
bad_end_date = parser.parse("2012-02-21 10:28:45")
summary = xmodule.course_module.CourseSummary(test_course.id, end=bad_end_date)
self.assertTrue(summary.has_ended())
@ddt.ddt
class IsNewCourseTestCase(unittest.TestCase):
"""Make sure the property is_new works on courses"""
shard = 1
def setUp(self):
super(IsNewCourseTestCase, self).setUp()
# Needed for test_is_newish
datetime_patcher = patch.object(
xmodule.course_metadata_utils, 'datetime',
Mock(wraps=datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.now.return_value = NOW
self.addCleanup(datetime_patcher.stop)
@patch('xmodule.course_metadata_utils.datetime.now')
def test_sorting_score(self, gmtime_mock):
gmtime_mock.return_value = NOW
day1 = '2012-01-01T12:00'
day2 = '2012-01-02T12:00'
dates = [
# Announce date takes priority over actual start
# and courses announced on a later date are newer
# than courses announced for an earlier date
((day1, day2, None), (day1, day1, None), self.assertLess),
((day1, day1, None), (day2, day1, None), self.assertEqual),
# Announce dates take priority over advertised starts
((day1, day2, day1), (day1, day1, day1), self.assertLess),
((day1, day1, day2), (day2, day1, day2), self.assertEqual),
# Later start == newer course
((day2, None, None), (day1, None, None), self.assertLess),
((day1, None, None), (day1, None, None), self.assertEqual),
# Non-parseable advertised starts are ignored in preference to actual starts
((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess),
((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual),
# Partially parsable advertised starts should take priority over start dates
((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess),
((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual),
# Parseable advertised starts take priority over start dates
((day1, None, day2), (day1, None, day1), self.assertLess),
((day2, None, day2), (day1, None, day2), self.assertEqual),
]
for a, b, assertion in dates:
a_score = get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score
b_score = get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score
print "Comparing %s to %s" % (a, b)
assertion(a_score, b_score)
start_advertised_settings = [
# start, advertised, result, is_still_default, date_time_result
('2012-12-02T12:00', None, 'Dec 02, 2012', False, u'Dec 02, 2012 at 12:00 UTC'),
('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011', False, u'Nov 01, 2011 at 12:00 UTC'),
('2012-12-02T12:00', 'Spring 2012', 'Spring 2012', False, 'Spring 2012'),
('2012-12-02T12:00', 'November, 2011', 'November, 2011', False, 'November, 2011'),
(xmodule.course_module.CourseFields.start.default, None, 'TBD', True, 'TBD'),
(xmodule.course_module.CourseFields.start.default, 'January 2014', 'January 2014', False, 'January 2014'),
]
def test_start_date_is_default(self):
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
self.assertEqual(d.start_date_is_still_default, s[3])
def test_display_organization(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.org, descriptor.display_org_with_default)
self.assertEqual(descriptor.display_org_with_default, "{0}_display".format(ORG))
def test_display_coursenumber(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.course, descriptor.display_number_with_default)
self.assertEqual(descriptor.display_number_with_default, "{0}_display".format(COURSE))
def test_is_newish(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
assert descriptor.is_newish is True
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=False)
assert descriptor.is_newish is False
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=True)
assert descriptor.is_newish is True
descriptor = get_dummy_course(start='2013-01-15T12:00')
assert descriptor.is_newish is True
descriptor = get_dummy_course(start='2013-03-01T12:00')
assert descriptor.is_newish is True
descriptor = get_dummy_course(start='2012-10-15T12:00')
assert descriptor.is_newish is False
descriptor = get_dummy_course(start='2012-12-31T12:00')
assert descriptor.is_newish is True
class DiscussionTopicsTestCase(unittest.TestCase):
shard = 1
def test_default_discussion_topics(self):
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual({'General': {'id': 'i4x-test_org-test_course-course-test'}}, d.discussion_topics)
class TeamsConfigurationTestCase(unittest.TestCase):
"""
Tests for the configuration of teams and the helper methods for accessing them.
"""
shard = 1
def setUp(self):
super(TeamsConfigurationTestCase, self).setUp()
self.course = get_dummy_course('2012-12-02T12:00')
self.course.teams_configuration = dict()
self.count = itertools.count()
def add_team_configuration(self, max_team_size=3, topics=None):
""" Add a team configuration to the course. """
teams_configuration = {}
teams_configuration["topics"] = [] if topics is None else topics
if max_team_size is not None:
teams_configuration["max_team_size"] = max_team_size
self.course.teams_configuration = teams_configuration
def make_topic(self):
""" Make a sample topic dictionary. """
next_num = self.count.next()
topic_id = "topic_id_{}".format(next_num)
name = "Name {}".format(next_num)
description = "Description {}".format(next_num)
return {"name": name, "description": description, "id": topic_id}
def test_teams_enabled_new_course(self):
# Make sure we can detect when no teams exist.
self.assertFalse(self.course.teams_enabled)
# add topics
self.add_team_configuration(max_team_size=4, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
# remove them again
self.add_team_configuration(max_team_size=4, topics=[])
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_max_size_only(self):
self.add_team_configuration(max_team_size=4)
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_no_max_size(self):
self.add_team_configuration(max_team_size=None, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
def test_teams_max_size_no_teams_configuration(self):
self.assertIsNone(self.course.teams_max_size)
def test_teams_max_size_with_teams_configured(self):
size = 4
self.add_team_configuration(max_team_size=size, topics=[self.make_topic(), self.make_topic()])
self.assertTrue(self.course.teams_enabled)
self.assertEqual(size, self.course.teams_max_size)
def test_teams_topics_no_teams(self):
self.assertIsNone(self.course.teams_topics)
def test_teams_topics_no_topics(self):
self.add_team_configuration(max_team_size=4)
self.assertEqual(self.course.teams_topics, [])
def test_teams_topics_with_topics(self):
topics = [self.make_topic(), self.make_topic()]
self.add_team_configuration(max_team_size=4, topics=topics)
self.assertTrue(self.course.teams_enabled)
self.assertEqual(self.course.teams_topics, topics)
class SelfPacedTestCase(unittest.TestCase):
"""Tests for self-paced courses."""
shard = 1
def setUp(self):
super(SelfPacedTestCase, self).setUp()
self.course = get_dummy_course('2012-12-02T12:00')
def test_default(self):
self.assertFalse(self.course.self_paced)
class BypassHomeTestCase(unittest.TestCase):
"""Tests for setting which allows course home to be bypassed."""
shard = 1
def setUp(self):
super(BypassHomeTestCase, self).setUp()
self.course = get_dummy_course('2012-12-02T12:00')
def test_default(self):
self.assertFalse(self.course.bypass_home)
class CourseDescriptorTestCase(unittest.TestCase):
"""
Tests for a select few functions from CourseDescriptor.
I wrote these test functions in order to satisfy the coverage checker for
PR #8484, which modified some code within CourseDescriptor. However, this
class definitely isn't a comprehensive test case for CourseDescriptor, as
writing a such a test case was out of the scope of the PR.
"""
shard = 1
def setUp(self):
"""
Initialize dummy testing course.
"""
super(CourseDescriptorTestCase, self).setUp()
self.course = get_dummy_course(start=_TODAY, end=_NEXT_WEEK)
def test_clean_id(self):
"""
Test CourseDescriptor.clean_id.
"""
self.assertEqual(
self.course.clean_id(),
"course_ORSXG5C7N5ZGOL3UMVZXIX3DN52XE43FF52GK43UL5ZHK3Q="
)
self.assertEqual(
self.course.clean_id(padding_char='$'),
"course_ORSXG5C7N5ZGOL3UMVZXIX3DN52XE43FF52GK43UL5ZHK3Q$"
)
def test_has_started(self):
"""
Test CourseDescriptor.has_started.
"""
self.course.start = _LAST_WEEK
self.assertTrue(self.course.has_started())
self.course.start = _NEXT_WEEK
self.assertFalse(self.course.has_started())
def test_number(self):
"""
Test CourseDescriptor.number.
"""
self.assertEqual(self.course.number, COURSE)
def test_set_default_certificate_available_date(self):
"""
The certificate_available_date field should default to two days
after the course end date.
"""
expected_certificate_available_date = self.course.end + timedelta(days=2)
self.assertEqual(expected_certificate_available_date, self.course.certificate_available_date)
|
Sorsly/subtle
|
refs/heads/master
|
google-cloud-sdk/lib/third_party/chardet/constants.py
|
237
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
import __builtin__
if not hasattr(__builtin__, 'False'):
False = 0
True = 1
else:
False = __builtin__.False
True = __builtin__.True
|
kfox1111/horizon
|
refs/heads/master
|
horizon/tables/__init__.py
|
48
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Convenience imports for public API components.
# Importing non-modules that are not used explicitly
from horizon.tables.actions import Action # noqa
from horizon.tables.actions import BatchAction # noqa
from horizon.tables.actions import DeleteAction # noqa
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import FixedFilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.tables.actions import UpdateAction # noqa
from horizon.tables.base import Column # noqa
from horizon.tables.base import DataTable # noqa
from horizon.tables.base import Row # noqa
from horizon.tables.views import DataTableView # noqa
from horizon.tables.views import MixedDataTableView # noqa
from horizon.tables.views import MultiTableMixin # noqa
from horizon.tables.views import MultiTableView # noqa
|
rghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/vyos/vyos_vlan.py
|
26
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_vlan
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage VLANs on VyOS network devices
description:
- This module provides declarative management of VLANs
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the VLAN.
address:
description:
- Configure Virtual interface address.
vlan_id:
description:
- ID of the VLAN. Range 0-4094.
required: true
interfaces:
description:
- List of interfaces that should be associated to the VLAN.
required: true
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vlan C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vlan on device it will result in failure.
version_added: "2.5"
delay:
description:
- Delay the play should wait to check for declarative intent params values.
default: 10
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Create vlan
vyos_vlan:
vlan_id: 100
name: vlan-100
interfaces: eth1
state: present
- name: Add interfaces to VLAN
vyos_vlan:
vlan_id: 100
interfaces:
- eth1
- eth2
- name: Configure virtual interface address
vyos_vlan:
vlan_id: 100
interfaces: eth1
address: 172.26.100.37/24
- name: vlan interface config + intent
vyos_vlan:
vlan_id: 100
interfaces: eth0
associated_interfaces:
- eth0
- name: vlan intent check
vyos_vlan:
vlan_id: 100
associated_interfaces:
- eth3
- eth4
- name: Delete vlan
vyos_vlan:
vlan_id: 100
interfaces: eth1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set interfaces ethernet eth1 vif 100 description VLAN 100
- set interfaces ethernet eth1 vif 100 address 172.26.100.37/24
- delete interfaces ethernet eth1 vif 100
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def search_obj_in_list(vlan_id, lst):
obj = list()
for o in lst:
if o['vlan_id'] == vlan_id:
obj.append(o)
return obj
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
vlan_id = w['vlan_id']
name = w['name']
address = w['address']
state = w['state']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(vlan_id, have)
if state == 'absent':
if obj_in_have:
for obj in obj_in_have:
for i in obj['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, vlan_id))
elif state == 'present':
if not obj_in_have:
if w['interfaces'] and w['vlan_id']:
for i in w['interfaces']:
cmd = 'set interfaces ethernet {0} vif {1}'.format(i, vlan_id)
if w['name']:
commands.append(cmd + ' description {}'.format(name))
elif w['address']:
commands.append(cmd + ' address {}'.format(address))
else:
commands.append(cmd)
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want)
if not obj_in_want:
for i in h['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, h['vlan_id']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
if not d['vlan_id']:
module.fail_json(msg='vlan_id is required')
d['vlan_id'] = str(d['vlan_id'])
module._check_required_one_of(module.required_one_of, item)
obj.append(d)
else:
obj.append({
'vlan_id': str(module.params['vlan_id']),
'name': module.params['name'],
'address': module.params['address'],
'state': module.params['state'],
'interfaces': module.params['interfaces'],
'associated_interfaces': module.params['associated_interfaces']
})
return obj
def map_config_to_obj(module):
objs = []
interfaces = list()
output = run_commands(module, 'show interfaces')
lines = output[0].strip().splitlines()[3:]
for l in lines:
splitted_line = re.split(r'\s{2,}', l.strip())
obj = {}
eth = splitted_line[0].strip("'")
if eth.startswith('eth'):
obj['interfaces'] = []
if '.' in eth:
interface = eth.split('.')[0]
obj['interfaces'].append(interface)
obj['vlan_id'] = eth.split('.')[-1]
else:
obj['interfaces'].append(eth)
obj['vlan_id'] = None
if splitted_line[1].strip("'") != '-':
obj['address'] = splitted_line[1].strip("'")
if len(splitted_line) > 3:
obj['name'] = splitted_line[3].strip("'")
obj['state'] = 'present'
objs.append(obj)
return objs
def check_declarative_intent_params(want, module, result):
have = None
obj_interface = list()
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
obj_in_have = search_obj_in_list(w['vlan_id'], have)
if obj_in_have:
for obj in obj_in_have:
obj_interface.extend(obj['interfaces'])
for w in want:
if w.get('associated_interfaces') is None:
continue
for i in w['associated_interfaces']:
if (set(obj_interface) - set(w['associated_interfaces'])) != set([]):
module.fail_json(msg='Interface {0} not configured on vlan {1}'.format(i, w['vlan_id']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
vlan_id=dict(type='int'),
name=dict(),
address=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['vlan_id', 'aggregate'],
['aggregate', 'interfaces', 'associated_interfaces']]
mutually_exclusive = [['vlan_id', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jabesq/home-assistant
|
refs/heads/dev
|
tests/components/person/__init__.py
|
39
|
"""The tests for the person component."""
|
lanceculnane/electricity-conservation
|
refs/heads/master
|
code/15regRF_mean.py
|
1
|
import numpy as np
import pandas as pd
import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import Imputer
full_pre = pd.read_csv("new.csv")
# full.fillna(0, inplace=True)
# Create our imputer to replace missing values with the mean e.g.
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp2 = imp.fit(full_pre.ix[:66,1:]) #fits it so it knows mean in each column
full = imp.transform(full_pre.ix[:66,1:]) #transforms all NaNs to mean in each column
# y = full.pop('unix_difference')
# full['new_y'] = 0
# for i, row in full.iterrows():
# if row['unix_difference'] > 100000:
# full['new_y'][i] = 1
y = np.array([6.95, 10.45, 18.88, 18.97, 1.13, 5.58, 18.56, 5.16, 7.94, 6.97, 4.05, 10.28, 3.90, 7.07, 7.13, 9.73, 4.80, 5.01, 8.17, 18.57, 9.21, 13.63, 19.32, 15.98, 6.32, 4.61, 0.70, 17.94, 8.56, 16.24, 2.97, 20.88, 19.56, 8.00, 7.42, 5.29, 14.98, 15.60, 5.88, 9.87, 2.25, 10.72, 9.31, 10.97, 6.15, 8.53, 9.01, 4.47, 13.67, 6.95, 15.03, 42.05, 30.82, 6.88, 25.45, 9.15, 17.79, 1.20, 2.97, 8.26, 13.68, 7.33, 0.46, 2.63, 4.10, 10.42, 1.50]).T
df_num_X2 = full
# df_num_X2 = df_num_X2.values
X_train, X_test, y_train, y_test = train_test_split(df_num_X2, y, test_size=0.3, random_state=23)
rf = RandomForestRegressor(n_estimators=1000,oob_score=True, max_features=30, n_jobs=3, verbose=True, min_samples_leaf=8)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print 'MSE: ', mse
print 'RMSE...', np.sqrt(mse)
print 'OOB_error: ', rf.oob_score_
print y_pred
# df["rf_pred"] = rf.predict(df_rf)
|
msimacek/fedmsg_meta_fedora_infrastructure
|
refs/heads/develop
|
fedmsg_meta_fedora_infrastructure/tests/nuancier.py
|
5
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" Tests for nuancier messages """
import unittest
from fedmsg.tests.test_meta import Base
from .common import add_doc
class TestNuancierElectionUpdated(Base):
""" These messages are published when **an admin updates the details** of
an existing election on the "Nuancier" wallpaper voting app.
"""
expected_title = "nuancier.election.update"
expected_subti = 'ralph changed the following details on the ' + \
'"Fedora 21" election: election year, election name'
expected_link = "https://apps.fedoraproject.org/nuancier/election/1"
expected_icon = "https://apps.fedoraproject.org/img/icons/nuancier.png"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c"
"?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph'])
expected_objects = set(['2014/Fedora 21/election/update'])
msg = {
"username": "threebean",
"i": 2,
"timestamp": 1392907947,
"msg_id": "2014-a97d68bd-bc9e-49e0-b028-f10297f36767",
"topic": "org.fedoraproject.dev.nuancier.election.update",
"msg": {
"updated": [
"election year",
"election name"
],
"election": {
"name": "Fedora 21",
"submission_date_start": 1392958800.0,
"date_end": 1393045200.0,
"date_start": 1392958800.0,
"year": "2014",
"id": 1
},
"agent": "ralph"
}
}
class TestNuancierElectionCreate(Base):
""" These messages are published when **an admin creates** a new election
on the "Nuancier" wallpaper voting app.
"""
expected_title = "nuancier.election.new"
expected_subti = 'ralph created a new election "Fedora 22"'
expected_link = "https://apps.fedoraproject.org/nuancier/election/4"
expected_icon = "https://apps.fedoraproject.org/img/icons/nuancier.png"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c"
"?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph'])
expected_objects = set(['2015/Fedora 22/election/new'])
msg = {
"username": "threebean",
"i": 1,
"timestamp": 1392908460,
"msg_id": "2014-88577310-f466-4c88-8deb-dc98c8abc09e",
"topic": "org.fedoraproject.dev.nuancier.election.new",
"msg": {
"election": {
"name": "Fedora 22",
"submission_date_start": 1392786000.0,
"date_end": 1393045200.0,
"date_start": 1392786000.0,
"year": "2015",
"id": 4
},
"agent": "ralph"
}
}
class TestNuancierCandidateNew(Base):
""" These messages are published when **a contributor submits a new
candidate** for an existing election on the "Nuancier" wallpaper voting
app.
"""
expected_title = "nuancier.candidate.new"
expected_subti = 'ralph uploaded a new candidate for the ' + \
'"Fedora 22" wallpaper election'
expected_link = "http://www.cyclelicio.us/wp-content/" + \
"uploads/2013/07/skvidal.jpg"
expected_icon = "https://apps.fedoraproject.org/img/icons/nuancier.png"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c"
"?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph'])
expected_objects = set(['2015/Fedora 22/Handsome/candidate/new'])
msg = {
"username": "threebean",
"i": 2,
"timestamp": 1392908853,
"msg_id": "2014-0838ce6a-9f99-41d9-84a4-e076665d3b2b",
"topic": "org.fedoraproject.dev.nuancier.candidate.new",
"msg": {
"agent": "ralph",
"candidate": {
"submitter": "ralph",
"author": "ralph",
"name": "Handsome",
"license": "CC-BY-SA",
"original_url": "http://www.cyclelicio.us/wp-content/"
"uploads/2013/07/skvidal.jpg"
},
"election": {
"name": "Fedora 22",
"submission_date_start": 1392786000.0,
"date_end": 1393045200.0,
"date_start": 1392958800.0,
"year": 2015,
"id": 4
}
}
}
class TestNuancierCandidateApprove(Base):
""" These messages are published when **an admin approves** a candidate
submission to the "Nuancier" wallpaper voting app.
"""
expected_title = "nuancier.candidate.approved"
expected_subti = 'gnokii approved ralph\'s "Handsome" submission ' + \
'to the "Fedora 22" wallpaper election'
expected_link = "http://www.cyclelicio.us/wp-content/" + \
"uploads/2013/07/skvidal.jpg"
expected_icon = "https://apps.fedoraproject.org/img/icons/nuancier.png"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"f0f0eef56d80913ec82275ed76dafe440ef8b4bba0228d97e7fb2ecb275d9591"
"?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph', 'gnokii'])
expected_objects = set(['2015/Fedora 22/Handsome/candidate/approved'])
msg = {
"username": "threebean",
"i": 1,
"timestamp": 1392916813,
"msg_id": "2014-32dce0de-5d80-4f9e-a445-d63b6f9e320f",
"topic": "org.fedoraproject.dev.nuancier.candidate.approved",
"msg": {
"agent": "gnokii",
"candidate": {
"submitter": "ralph",
"author": "ralph",
"name": "Handsome",
"license": "CC-BY-SA",
"original_url": "http://www.cyclelicio.us/wp-content/"
"uploads/2013/07/skvidal.jpg"
},
"election": {
"name": "Fedora 22",
"submission_date_start": 1392786000.0,
"date_end": 1393045200.0,
"date_start": 1392958800.0,
"year": 2015,
"id": 4
}
}
}
class TestNuancierCandidateDeny(Base):
""" These messages are published when **an admin denies** a candidate
submission to the "Nuancier" wallpaper voting app.
"""
expected_title = "nuancier.candidate.denied"
expected_subti = 'gnokii denied ralph\'s "Handsome" submission ' + \
'to the "Fedora 22" wallpaper election'
expected_link = "http://www.cyclelicio.us/wp-content/" + \
"uploads/2013/07/skvidal.jpg"
expected_icon = "https://apps.fedoraproject.org/img/icons/nuancier.png"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"f0f0eef56d80913ec82275ed76dafe440ef8b4bba0228d97e7fb2ecb275d9591"
"?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph', 'gnokii'])
expected_objects = set(['2015/Fedora 22/Handsome/candidate/denied'])
msg = {
"username": "threebean",
"i": 1,
"timestamp": 1392916813,
"msg_id": "2014-32dce0de-5d80-4f9e-a445-d63b6f9e320f",
"topic": "org.fedoraproject.dev.nuancier.candidate.denied",
"msg": {
"agent": "gnokii",
"candidate": {
"submitter": "ralph",
"author": "ralph",
"name": "Handsome",
"license": "CC-BY-SA",
"original_url": "http://www.cyclelicio.us/wp-content/"
"uploads/2013/07/skvidal.jpg"
},
"election": {
"name": "Fedora 22",
"submission_date_start": 1392786000.0,
"date_end": 1393045200.0,
"date_start": 1392958800.0,
"year": 2015,
"id": 4
}
}
}
add_doc(locals())
if __name__ == '__main__':
unittest.main()
|
BTCDDev/bitcoin
|
refs/heads/master
|
qa/rpc-tests/walletbackup.py
|
85
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
bestwpw/crosswalk
|
refs/heads/master
|
build/android/merge_java_srcs.py
|
37
|
#!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def DoCopy(path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
invalid_lines.append(line)
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path)
if __name__ == '__main__':
sys.exit(main())
|
charles-cooper/raiden
|
refs/heads/master
|
raiden/tests/smart_contracts/netting_channel/test_updatetransfer.py
|
1
|
# -*- coding: utf-8 -*-
import pytest
from ethereum.tester import TransactionFailed
from coincurve import PrivateKey
from raiden.messages import DirectTransfer
from raiden.utils import privatekey_to_address
from raiden.tests.utils.transfer import make_direct_transfer_from_channel
def test_transfer_update_event(tester_state, tester_channels, tester_events):
""" The event TransferUpdated is emitted after a successful call to
updateTransfer.
"""
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
address1 = privatekey_to_address(pkey1)
direct0 = make_direct_transfer_from_channel(channel0, channel1, amount=90, pkey=pkey0)
direct0_data = str(direct0.packed().data)
nettingchannel.close('', sender=pkey0)
previous_events = list(tester_events)
nettingchannel.updateTransfer(direct0_data, sender=pkey1)
assert len(previous_events) + 1 == len(tester_events)
assert tester_events[-1] == {
'_event_type': 'TransferUpdated',
'node_address': address1.encode('hex'),
'block_number': tester_state.block.number,
}
def test_update_fails_on_open_channel(tester_channels):
""" Cannot call updateTransfer on a open channel. """
pkey0, _, nettingchannel, channel0, channel1 = tester_channels[0]
transfer0 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer0_data = str(transfer0.packed().data)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer0_data, sender=pkey0)
def test_update_not_allowed_after_settlement_period(settle_timeout, tester_channels, tester_state):
""" updateTransfer cannot be called after the settlement period. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
direct0 = make_direct_transfer_from_channel(channel0, channel1, amount=70, pkey=pkey0)
direct0_data = str(direct0.packed().data)
nettingchannel.close('', sender=pkey0)
tester_state.mine(number_of_blocks=settle_timeout + 1)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(direct0_data, sender=pkey1)
def test_update_not_allowed_for_the_closing_address(tester_channels):
""" Closing address cannot call updateTransfer. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
transfer0 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer0_data = str(transfer0.packed().data)
transfer1 = make_direct_transfer_from_channel(channel1, channel0, amount=10, pkey=pkey1)
transfer1_data = str(transfer1.packed().data)
nettingchannel.close('', sender=pkey0)
# do not accept a transfer from the party that closed
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer0_data, sender=pkey0)
# nor a transfer from the partner
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer1_data, sender=pkey0)
@pytest.mark.parametrize('number_of_nodes', [3])
def test_update_must_fail_with_a_nonparticipant_transfer(tester_channels, private_keys):
""" updateTransfer must not accept a transfer from a non participant. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
nonparticipant_key = private_keys[2]
opened_block = nettingchannel.opened(sender=pkey0)
# make a transfer where pkey1 is the target
transfer_nonparticipant = DirectTransfer(
identifier=1,
nonce=1 + (opened_block * (2 ** 32)),
token=channel0.token_address,
transferred_amount=10,
recipient=channel1.our_address,
locksroot='',
)
nonparticipant_address = privatekey_to_address(nonparticipant_key)
nonparticipant_sign_key = PrivateKey(nonparticipant_key)
transfer_nonparticipant.sign(nonparticipant_sign_key, nonparticipant_address)
transfer_nonparticipant_data = str(transfer_nonparticipant.packed().data)
nettingchannel.close('', sender=pkey0)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer_nonparticipant_data, sender=pkey1)
@pytest.mark.parametrize('number_of_nodes', [3])
def test_update_must_fail_with_a_wrong_recipient(tester_channels, private_keys):
""" updateTransfer must not accept a transfer from a non participant. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
opened_block = nettingchannel.opened(sender=pkey0)
nonparticipant_address = privatekey_to_address(private_keys[2])
# make a transfer where pkey1 is the target
transfer_wrong_recipient = DirectTransfer(
identifier=1,
nonce=1 + (opened_block * (2 ** 32)),
token=channel0.token_address,
transferred_amount=10,
recipient=nonparticipant_address,
locksroot='',
)
our_address = privatekey_to_address(pkey0)
our_sign_key = PrivateKey(pkey0)
transfer_wrong_recipient.sign(our_sign_key, our_address)
transfer_wrong_recipient_data = str(transfer_wrong_recipient.packed().data)
nettingchannel.close('', sender=pkey0)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer_wrong_recipient_data, sender=pkey1)
def test_update_called_multiple_times_same_transfer(tester_channels):
""" updateTransfer can be called only once. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
transfer0 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer0_data = str(transfer0.packed().data)
nettingchannel.close('', sender=pkey0)
nettingchannel.updateTransfer(transfer0_data, sender=pkey1)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer0_data, sender=pkey1)
def test_update_called_multiple_times_new_transfer(tester_channels):
""" updateTransfer second call must fail even if there is a new transfer. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
transfer0 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer0_data = str(transfer0.packed().data)
transfer1 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer1_data = str(transfer1.packed().data)
nettingchannel.close('', sender=pkey0)
nettingchannel.updateTransfer(transfer0_data, sender=pkey1)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer1_data, sender=pkey1)
def test_update_called_multiple_times_older_transfer(tester_channels):
""" updateTransfer second call must fail even if called with an older transfer. """
pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0]
transfer0 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer0_data = str(transfer0.packed().data)
transfer1 = make_direct_transfer_from_channel(channel0, channel1, amount=10, pkey=pkey0)
transfer1_data = str(transfer1.packed().data)
nettingchannel.close('', sender=pkey0)
nettingchannel.updateTransfer(transfer1_data, sender=pkey1)
with pytest.raises(TransactionFailed):
nettingchannel.updateTransfer(transfer0_data, sender=pkey1)
|
rjw57/foldbeam
|
refs/heads/master
|
webui/index.py
|
1
|
import logging
logging.basicConfig(level=logging.INFO)
from pyjamas.ui.Button import Button as ButtonBase
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.FlowPanel import FlowPanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.HTML import HTML
from pyjamas import Window
from HorizontalCollapsePanel import HorizontalCollapsePanel
from Sidebar import Sidebar
from Map import Map
from client.user import User
class Application(SimplePanel):
def __init__(self, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
self.user = User('http://localhost:8888/user1')
self.user.addErrorListener(self._user_error)
self.user.addLoadedListener(self._update_user)
self._update_user(self.user)
self.user.get()
def _update_user(self, user):
if user.username is None:
return
user.maps.get()
user.maps.addLoadedListener(self._update_user_maps)
self._update_user_maps(self.user.maps)
def _user_error(self, user, status, response):
logging.error('Error loading user from %s: %s' % (user.get_resource_url(), status))
def _update_user_maps(self, maps):
if maps.items is None:
# Map list is not yet loaded
return
if len(maps.items) == 0:
logging.error('User has no maps')
return
m = maps.items[0]
m.addLoadedListener(self._update_map)
self._update_map(m)
def _update_map(self, m):
if m.name is None:
# Data is not yet loaded
return
m.layers.get()
sp = HorizontalPanel(Size=('100%', '100%'))
sidebar = Sidebar()
sidebar.setLayersCollection(m.layers)
sp.add(sidebar)
sp.setCellWidth(sidebar, '25%')
map_ = Map(Size=('100%', '100%'))
map_.set_map(m)
sp.add(map_)
self.setWidget(sp)
if __name__ == '__main__':
app = Application(StyleName='top-container')
RootPanel().add(app)
|
lukleh/TwistedBot
|
refs/heads/master
|
libs/twisted/persisted/__init__.py
|
42
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Persisted: utilities for managing persistence.
"""
|
nickchen-mitac/fork
|
refs/heads/master
|
src/ava/user/models.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
from ava.util import time_uuid
from ava import APP_NAME
class Notice(object):
"""
Represent user notices.
"""
INFO = 20
WARNING = 30
ERROR = 40
NOTIFY = 1 # notification.
CONFIRM = 2 # yes or no question.
ASK_TEXT = 3 # ask for a text input from the user.
ASK_SECRET = 4 # ask for a secret, like password, from the user
def __init__(self, **kwargs):
self._id = kwargs.get('id', time_uuid.oid())
self._title = kwargs.get('title', '')
self._message = kwargs.get('message', '')
self._kind = kwargs.get('kind', self.NOTIFY)
self._timestamp = kwargs.get('timestamp', datetime.now().isoformat())
self._priority = kwargs.get('priority', self.INFO)
self._app_icon = None
self._app_name = APP_NAME
self._reply_to = None
@property
def id(self):
return self._id
@property
def title(self):
return self._title
@property
def message(self):
return self._message
@property
def kind(self):
return self._kind
@property
def priority(self):
return self._priority
def to_dict(self):
return dict(
id=self._id,
message=self._message,
title=self._title,
priority=self._priority,
timestamp=self._timestamp,
kind=self._kind
)
def from_dict(self, d):
self._id = d.get('id')
self._message = d.get('message')
self._title = d.get('title')
self._priority = d.get('priority')
self._kind = d.get('kind')
self._timestamp = d.get('timestamp')
|
nagyistoce/odoo-dev-odoo
|
refs/heads/8.0
|
addons/website_hr_recruitment/models/__init__.py
|
390
|
import hr_job
|
jell0720/cubes
|
refs/heads/master
|
cubes/ext.py
|
5
|
# -*- coding: utf-8 -*-
from .common import decamelize, to_identifier, coalesce_options
from .errors import ArgumentError, InternalError
from collections import defaultdict, OrderedDict
from pkg_resources import iter_entry_points
from textwrap import dedent
__all__ = [
"EXTENSION_TYPES",
"ExtensionFinder",
]
# Known extension types.
# Keys:
# base: extension base class name
# suffix: extension class suffix to be removed for default name (same as
# base class nameif not specified)
# modules: a dictionary of extension names and module name to be loaded
# laily
EXTENSION_TYPES = {
"browser": "Aggregation browser",
"store": "Data store",
"model_provider": "Model provider",
"formatter": "Formatter",
"authorizer": "Authorizer",
"authenticator": "Authenticator",
"request_log_handler": "Request log handler",
}
# Information about built-in extensions. Supposedly faster loading (?).
#
_BUILTIN_EXTENSIONS = {
"authenticators": {
"admin_admin": "cubes.server.auth:AdminAdminAuthenticator",
"pass_parameter": "cubes.server.auth:PassParameterAuthenticator",
"http_basic_proxy": "cubes.server.auth:HTTPBasicProxyAuthenticator",
},
"authorizers": {
"simple": "cubes.auth:SimpleAuthorizer",
},
"browsers": {
"sql":"cubes.sql.browser:SQLBrowser",
"slicer":"cubes.server.browser:SlicerBrowser",
},
"formatters": {
"cross_table": "cubes.formatters:CrossTableFormatter",
"csv": "cubes.formatters:CSVFormatter",
"html_cross_table": "cubes.formatters:HTMLCrossTableFormatter",
},
"providers": {
"default":"cubes.providers:StaticModelProvider",
"slicer":"cubes.server.store:SlicerModelProvider",
},
"request_log_handlers": {
"default":"cubes.server.logging:DefaultRequestLogger",
"csv":"cubes.server.logging:CSVRequestLogger",
"json":"cubes.server.logging:JSONRequestLogger",
"sql":"cubes.sql.logging:SQLRequestLogger",
},
"stores": {
"sql":"cubes.sql.store:SQLStore",
"slicer":"cubes.server.store:SlicerStore",
},
}
_DEFAULT_OPTIONS = {
}
class _Extension(object):
"""
Cubes Extension wrapper.
`options` – List of extension options. The options is a list of
dictionaries with keys:
* `name` – option name
* `type` – option data type (default is ``string``)
* `description` – description (optional)
* `label` – human readable label (optional)
* `values` – valid values for the option.
"""
def __init__(self, type_, entry=None, factory=None, name=None):
if factory is not None and entry is not None:
raise ArgumentError("Can't set both extension factory and entry "
"(in extension '{}')".format(name))
elif factory is None and entry is None:
raise ArgumentError("Neither extension factory nor entry provided "
"(in extension '{}')".format(name))
self.type_ = type_
self.entry = entry
self.name = name or entry.name
# After loading...
self.options = []
self.option_types = {}
self._factory = None
if factory is not None:
self.factory = factory
@property
def factory(self):
if self._factory is not None:
return self._factory
elif self.entry:
# This must not fail or result in None
self.factory = self.entry.load()
return self._factory
else:
raise InternalError("No factory or entry set for extension '{}'"
.format(self.name))
@factory.setter
def factory(self, factory):
if factory is None:
raise InternalError("Can't set extension factory to None")
self._factory = factory
defaults = _DEFAULT_OPTIONS.get(self.type_, [])
if hasattr(self._factory, "__options__"):
options = self._factory.__options__ or []
else:
options = []
self.options = OrderedDict()
for option in defaults + options:
name = option["name"]
self.options[name] = option
self.option_types[name] = option.get("type", "string")
self.option_types = self.option_types or {}
@property
def is_builtin(self):
return self.entry is None
@property
def label(self):
if hasattr(self.factory, "__label__"):
return self.factory.__label__
else:
return decamelize(self.factory.__name__)
@property
def description(self):
if hasattr(self.factory, "__description__"):
desc = self.factory.__description__ or ""
return dedent(desc)
else:
return ""
def create(self, *args, **kwargs):
"""Creates an extension. First argument should be extension's name."""
factory = self.factory
kwargs = coalesce_options(dict(kwargs),
self.option_types)
return factory(*args, **kwargs)
class ExtensionFinder(object):
def __init__(self, type_):
self.type_ = type_
self.group = "cubes.{}".format(type_)
self.extensions = {}
self.builtins = _BUILTIN_EXTENSIONS.get(self.type_, {})
def discover(self, name=None):
"""Find all entry points."""
for obj in iter_entry_points(group=self.group, name=name):
ext = _Extension(self.type_, obj)
self.extensions[ext.name] = ext
def builtin(self, name):
try:
ext_mod = self.builtins[name]
except KeyError:
return None
(modname, attr) = ext_mod.split(":")
module = _load_module(modname)
factory = getattr(module, attr)
ext = _Extension(self.type_, name=name, factory=factory)
self.extensions[name] = ext
return ext
def names(self):
"""Return list of extension names."""
if not self.extensions:
self.discover()
names = list(self.builtins.keys())
names += self.extensions.keys()
return sorted(names)
def get(self, name):
"""Return extenson object by name. Load if necessary."""
ext = self.extensions.get(name)
if not ext:
ext = self.builtin(name)
if not ext:
self.discover()
try:
ext = self.extensions[name]
except KeyError:
raise InternalError("Unknown '{}' extension '{}'"
.format(self.type_, name))
return ext
def __call__(self, _ext_name, *args, **kwargs):
return self.create(_ext_name, *args, **kwargs)
def factory(self, name):
"""Return extension factory."""
ext = self.get(name)
if not ext.factory:
raise BackendError("Unable to get factory for extension '{}'"
.format(name))
return ext.factory
def create(self, _ext_name, *args, **kwargs):
"""Create an instance of extension `_ext_name` with given arguments.
The keyword arguments are converted to their appropriate types
according to extensions `__options__` list. This allows options to be
specified as strings in a configuration files or configuration
variables."""
ext = self.get(_ext_name)
return ext.create(*args, **kwargs)
def register(self, _ext_name, factory):
ext = _Extension(self.type_, name=_ext_name)
ext.set_factory(factory)
self.extensions[name] = ext
return ext
def _load_module(modulepath):
"""Load module `modulepath` and return the last module object in the
module path."""
mod = __import__(modulepath)
path = []
for token in modulepath.split(".")[1:]:
path.append(token)
mod = getattr(mod, token)
return mod
authenticator = ExtensionFinder("authenticators")
authorizer = ExtensionFinder("authorizers")
browser = ExtensionFinder("browsers")
formatter = ExtensionFinder("formatters")
model_provider = ExtensionFinder("providers")
request_log_handler = ExtensionFinder("request_log_handlers")
store = ExtensionFinder("stores")
|
schatt/zulip
|
refs/heads/master
|
zerver/lib/notifications.py
|
116
|
from confirmation.models import Confirmation
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from zerver.decorator import statsd_increment, uses_mandrill
from zerver.models import Recipient, ScheduledJob, UserMessage, \
Stream, get_display_recipient, get_user_profile_by_email, \
get_user_profile_by_id, receives_offline_notifications, \
get_context_for_message
import datetime
import re
import subprocess
import ujson
import urllib
from collections import defaultdict
def unsubscribe_token(user_profile):
# Leverage the Django confirmations framework to generate and track unique
# unsubscription tokens.
return Confirmation.objects.get_link_for_object(user_profile).split("/")[-1]
def one_click_unsubscribe_link(user_profile, endpoint):
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
token = unsubscribe_token(user_profile)
base_url = "https://" + settings.EXTERNAL_HOST
resource_path = "accounts/unsubscribe/%s/%s" % (endpoint, token)
return "%s/%s" % (base_url.rstrip("/"), resource_path)
def hashchange_encode(string):
# Do the same encoding operation as hashchange.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.quote(
string.encode("utf-8"), safe="").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(participants):
participants.sort()
base_url = "https://%s/#narrow/pm-with/" % (settings.EXTERNAL_HOST,)
return base_url + hashchange_encode(",".join(participants))
def stream_narrow_url(stream):
base_url = "https://%s/#narrow/stream/" % (settings.EXTERNAL_HOST,)
return base_url + hashchange_encode(stream)
def topic_narrow_url(stream, topic):
base_url = "https://%s/#narrow/stream/" % (settings.EXTERNAL_HOST,)
return "%s%s/topic/%s" % (base_url, hashchange_encode(stream),
hashchange_encode(topic))
def build_message_list(user_profile, messages):
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = []
def sender_string(message):
sender = ''
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
sender = message.sender.full_name
return sender
def relative_to_full_url(content):
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
settings.EXTERNAL_HOST + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/third/gemoji/images/emoji/snowflake.png".
content = re.sub(
r"static/third/gemoji/images/emoji/",
settings.EXTERNAL_HOST + r"/static/third/gemoji/images/emoji/",
content)
return content
def fix_plaintext_image_urls(content):
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = "You and %s" % (message.sender.full_name)
html_link = pm_narrow_url([message.sender.email])
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = "You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url([r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
header = "%s > %s" % (disp_recipient, message.subject)
stream_link = stream_narrow_url(disp_recipient)
topic_link = topic_narrow_url(disp_recipient, message.subject)
header_html = "<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
template_payload = {
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'url': 'https://%s' % (settings.EXTERNAL_HOST,),
'reply_warning': False,
'external_host': settings.EXTERNAL_HOST,
'mention':missed_messages[0].recipient.type == Recipient.STREAM,
'reply_to_zulip': True,
}
headers = {}
from zerver.lib.email_mirror import create_missed_message_address
address = create_missed_message_address(user_profile, missed_messages[0])
headers['Reply-To'] = address
senders = set(m.sender.full_name for m in missed_messages)
sender_str = ", ".join(senders)
plural_messages = 's' if len(missed_messages) > 1 else ''
subject = "Missed Zulip%s from %s" % (plural_messages, sender_str)
from_email = "%s (via Zulip) <%s>" % (sender_str, settings.NOREPLY_EMAIL_ADDRESS)
text_content = loader.render_to_string('zerver/missed_message_email.txt', template_payload)
html_content = loader.render_to_string('zerver/missed_message_email_html.txt', template_payload)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user_profile.email],
headers = headers)
msg.attach_alternative(html_content, "text/html")
msg.send()
user_profile.last_reminder = datetime.datetime.now()
user_profile.save(update_fields=['last_reminder'])
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events(user_profile, missed_messages, message_count):
"""
Send a reminder email and/or push notifications to a user if she's missed some PMs by being offline
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about
"""
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
senders = set(m.sender.full_name for m in missed_messages)
sender_str = ", ".join(senders)
plural_messages = 's' if len(missed_messages) > 1 else ''
template_payload = {'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'url': 'https://%s' % (settings.EXTERNAL_HOST,),
'reply_warning': False,
'external_host': settings.EXTERNAL_HOST}
headers = {}
if all(msg.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL)
for msg in missed_messages):
# If we have one huddle, set a reply-to to all of the members
# of the huddle except the user herself
disp_recipients = [", ".join(recipient['email']
for recipient in get_display_recipient(mesg.recipient)
if recipient['email'] != user_profile.email)
for mesg in missed_messages]
if all(msg.recipient.type == Recipient.HUDDLE for msg in missed_messages) and \
len(set(disp_recipients)) == 1:
headers['Reply-To'] = disp_recipients[0]
elif len(senders) == 1:
headers['Reply-To'] = missed_messages[0].sender.email
else:
template_payload['reply_warning'] = True
else:
# There are some @-mentions mixed in with personals
template_payload['mention'] = True
template_payload['reply_warning'] = True
headers['Reply-To'] = "Nobody <%s>" % (settings.NOREPLY_EMAIL_ADDRESS,)
# Give users a one-click unsubscribe link they can use to stop getting
# missed message emails without having to log in first.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
template_payload["unsubscribe_link"] = unsubscribe_link
subject = "Missed Zulip%s from %s" % (plural_messages, sender_str)
from_email = "%s (via Zulip) <%s>" % (sender_str, settings.NOREPLY_EMAIL_ADDRESS)
text_content = loader.render_to_string('zerver/missed_message_email.txt', template_payload)
html_content = loader.render_to_string('zerver/missed_message_email_html.txt', template_payload)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user_profile.email],
headers = headers)
msg.attach_alternative(html_content, "text/html")
msg.send()
user_profile.last_reminder = datetime.datetime.now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = [um.message for um in UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids,
flags=~UserMessage.flags.read)]
if not messages:
return
messages_by_recipient_subject = defaultdict(list)
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.subject)].append(msg)
mesage_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
if user_profile.realm.domain == 'zulip.com':
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
unique_messages.values(),
mesage_count_by_recipient_subject[recipient_subject],
)
else:
all_messages = [
msg_
for msg_list in messages_by_recipient_subject.values()
for msg_ in msg_list
]
unique_messages = {m.id: m for m in all_messages}
do_send_missedmessage_events(
user_profile,
unique_messages.values(),
len(messages),
)
@uses_mandrill
def clear_followup_emails_queue(email, mail_client=None):
"""
Clear out queued emails (from Mandrill's queue) that would otherwise
be sent to a specific email address. Optionally specify which sender
to filter by (useful when there are more Zulip subsystems using our
mandrill account).
`email` is a string representing the recipient email
`from_email` is a string representing the zulip email account used
to send the email (for example `support@zulip.com` or `signups@zulip.com`)
"""
# SMTP mail delivery implementation
if not mail_client:
items = ScheduledJob.objects.filter(type=ScheduledJob.EMAIL, filter_string__iexact = email)
items.delete()
return
# Mandrill implementation
for email in mail_client.messages.list_scheduled(to=email):
result = mail_client.messages.cancel_scheduled(id=email["_id"])
if result.get("status") == "error":
print result.get("name"), result.get("error")
return
def log_digest_event(msg):
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
@uses_mandrill
def send_future_email(recipients, email_html, email_text, subject,
delay=datetime.timedelta(0), sender=None,
tags=[], mail_client=None):
"""
Sends email via Mandrill, with optional delay
'mail_client' is filled in by the decorator
"""
# When sending real emails while testing locally, don't accidentally send
# emails to non-zulip.com users.
if settings.DEVELOPMENT and \
settings.EMAIL_BACKEND != 'django.core.mail.backends.console.EmailBackend':
for recipient in recipients:
email = recipient.get("email")
if get_user_profile_by_email(email).realm.domain != "zulip.com":
raise ValueError("digest: refusing to send emails to non-zulip.com users.")
# message = {"from_email": "othello@zulip.com",
# "from_name": "Othello",
# "html": "<p>hello</p> there",
# "tags": ["signup-reminders"],
# "to": [{'email':"acrefoot@zulip.com", 'name': "thingamajig"}]
# }
# SMTP mail delivery implementation
if not mail_client:
if sender is None:
# This may likely overridden by settings.DEFAULT_FROM_EMAIL
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
for recipient in recipients:
email_fields = {'email_html': email_html,
'email_subject': subject,
'email_text': email_text,
'recipient_email': recipient.get('email'),
'recipient_name': recipient.get('name'),
'sender_email': sender['email'],
'sender_name': sender['name']}
ScheduledJob.objects.create(type=ScheduledJob.EMAIL, filter_string=recipient.get('email'),
data=ujson.dumps(email_fields),
scheduled_timestamp=datetime.datetime.utcnow() + delay)
return
# Mandrill implementation
if sender is None:
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
message = {'from_email': sender['email'],
'from_name': sender['name'],
'to': recipients,
'subject': subject,
'html': email_html,
'text': email_text,
'tags': tags,
}
# ignore any delays smaller than 1-minute because it's cheaper just to sent them immediately
if type(delay) is not datetime.timedelta:
raise TypeError("specified delay is of the wrong type: %s" % (type(delay),))
if delay < datetime.timedelta(minutes=1):
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool")
else:
send_time = (datetime.datetime.utcnow() + delay).__format__("%Y-%m-%d %H:%M:%S")
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool", send_at=send_time)
problems = [result for result in results if (result['status'] in ('rejected', 'invalid'))]
if problems:
for problem in problems:
if problem["status"] == "rejected":
if problem["reject_reason"] == "hard-bounce":
# A hard bounce means the address doesn't exist or the
# recipient mail server is completely blocking
# delivery. Don't try to send further emails.
if "digest-emails" in tags:
from zerver.lib.actions import do_change_enable_digest_emails
bounce_email = problem["email"]
user_profile = get_user_profile_by_email(bounce_email)
do_change_enable_digest_emails(user_profile, False)
log_digest_event("%s\nTurned off digest emails for %s" % (
str(problems), bounce_email))
continue
elif problem["reject_reason"] == "soft-bounce":
# A soft bounce is temporary; let it try to resolve itself.
continue
raise Exception(
"While sending email (%s), encountered problems with these recipients: %r"
% (subject, problems))
return
def send_local_email_template_with_delay(recipients, template_prefix,
template_payload, delay,
tags=[], sender={'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}):
html_content = loader.render_to_string(template_prefix + ".html", template_payload)
text_content = loader.render_to_string(template_prefix + ".text", template_payload)
subject = loader.render_to_string(template_prefix + ".subject", template_payload).strip()
return send_future_email(recipients,
html_content,
text_content,
subject,
delay=delay,
sender=sender,
tags=tags)
def enqueue_welcome_emails(email, name):
sender = {'email': 'wdaher@zulip.com', 'name': 'Waseem Daher'}
if settings.VOYAGER:
sender = {'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'}
user_profile = get_user_profile_by_email(email)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
template_payload = {'name': name,
'not_voyager': not settings.VOYAGER,
'external_host': settings.EXTERNAL_HOST,
'unsubscribe_link': unsubscribe_link}
#Send day 1 email
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day1",
template_payload,
datetime.timedelta(hours=1),
tags=["followup-emails"],
sender=sender)
#Send day 2 email
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
# 11 AM EDT
tomorrow_morning = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day, 15, 0)
assert(datetime.datetime.utcnow() < tomorrow_morning)
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day2",
template_payload,
tomorrow_morning - datetime.datetime.utcnow(),
tags=["followup-emails"],
sender=sender)
def convert_html_to_markdown(html):
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode("utf-8"))[0].strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(r"!\[\]\((\S*)/(\S*)\?(\S*)\)",
r"[\2](\1/\2)", markdown).decode("utf-8")
|
tannishk/airmozilla
|
refs/heads/master
|
airmozilla/manage/tests/views/test_events.py
|
2
|
import re
import cgi
import datetime
import json
import urllib
import os
import shutil
from cStringIO import StringIO
from nose.tools import eq_, ok_
import mock
import pyquery
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission
from django.core import mail
from django.utils import timezone
from django.utils.timezone import utc
from django.core.files import File
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
EventOldSlug,
Location,
Template,
Channel,
Tag,
SuggestedEvent,
SuggestedEventComment,
VidlySubmission,
URLMatch,
URLTransform,
EventHitStats,
UserProfile,
CuratedGroup,
Picture
)
from airmozilla.base.tests.test_mozillians import (
Response,
GROUPS1,
GROUPS2
)
from airmozilla.uploads.models import Upload
from airmozilla.comments.models import Discussion
from airmozilla.manage.tests.test_vidly import (
SAMPLE_XML,
get_custom_XML,
SAMPLE_MEDIA_UPDATE_FAILED_XML,
SAMPLE_MEDIA_UPDATED_XML,
)
from airmozilla.staticpages.models import StaticPage
from airmozilla.manage.views.events import is_privacy_vidly_mismatch
from .base import ManageTestCase
class _Response(object):
def __init__(self, content, status_code=200):
self.content = self.text = content
self.status_code = status_code
class TestEvents(ManageTestCase):
event_base_data = {
'status': Event.STATUS_SCHEDULED,
'description': '...',
'privacy': 'public',
'location': '1',
'channels': '1',
'tags': 'xxx',
'template': '1',
'start_time': '2012-3-4 12:00',
'estimated_duration': '3600',
}
placeholder = 'airmozilla/manage/tests/firefox.png'
def test_event_request(self):
"""Event request responses and successful creation in the db."""
response = self.client.get(reverse('manage:event_request'))
eq_(response.status_code, 200)
with open(self.placeholder) as fp:
response_ok = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Airmozilla Launch Test')
)
response_fail = self.client.post(
reverse('manage:event_request'),
{
'title': 'Test fails, not enough data!',
}
)
response_cancel = self.client.post(
reverse('manage:event_request'),
{
'cancel': 'yes'
}
)
self.assertRedirects(response_ok, reverse('manage:events'))
eq_(response_fail.status_code, 200)
event = Event.objects.get(title='Airmozilla Launch Test')
eq_(event.location, Location.objects.get(id=1))
eq_(event.creator, self.user)
eq_(response_cancel.status_code, 302)
self.assertRedirects(response_cancel, reverse('manage:events'))
def test_event_request_with_approvals(self):
group1 = Group.objects.create(name='testapprover')
group2 = Group.objects.create(name='Group2')
permission = Permission.objects.get(codename='change_approval')
group1.permissions.add(permission)
group2.permissions.add(permission)
group_user = User.objects.create_user(
'username',
'em@ail.com',
'secret'
)
group_user.groups.add(group2)
inactive_user = User.objects.create_user(
'longgone',
'long@gone.com',
'secret'
)
inactive_user.is_active = False
inactive_user.save()
inactive_user.groups.add(group2)
long_description_with_html = (
'The researchers took a "theoretical" approach instead, using '
'something known as the no-signalling conditions. They '
'considered an entangled system with a set of independent '
'physical attributes, some observable, some hidden variables. '
'\n\n'
'Next, they allowed the state of the hidden variables '
'to propagate faster than the speed of light, which let '
'them influence the measurements on the separated pieces of '
'the experiment. '
'\n\n'
'<ul>'
'<li>One</li>'
'<li>Two</li>'
'</ul>'
'\n\n'
'Baskin & Robbins'
)
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data,
description=long_description_with_html,
placeholder_img=fp,
title='Airmozilla Launch Test',
approvals=[group1.pk, group2.pk])
)
eq_(response.status_code, 302)
event = Event.objects.get(title='Airmozilla Launch Test')
approvals = event.approval_set.all()
eq_(approvals.count(), 2)
# this should send an email to all people in those groups
email_sent = mail.outbox[-1]
ok_(group_user.email in email_sent.to)
ok_(inactive_user.email not in email_sent.to)
ok_(event.title in email_sent.subject)
ok_(reverse('manage:approvals') in email_sent.body)
ok_('Baskin & Robbins' in email_sent.body)
ok_('<li>One</li>' not in email_sent.body)
ok_('* One\n' in email_sent.body)
# edit it and drop the second group
response_ok = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.id}),
dict(self.event_base_data, title='Different title',
approvals=[])
)
eq_(response_ok.status_code, 302)
event = Event.objects.get(title='Different title')
approvals = event.approval_set.all()
# it's impossible to un-set approvals
# see https://bugzilla.mozilla.org/show_bug.cgi?id=839024
eq_(approvals.count(), 2)
def test_events(self):
"""The events page responds successfully."""
response = self.client.get(reverse('manage:events'))
eq_(response.status_code, 200)
def test_events_with_basic_filtering(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
eq_(results['events'][0]['id'], event.id)
event.status = Event.STATUS_PENDING
event.save()
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
# still there
eq_(results['events'][0]['id'], event.id)
event.status = Event.STATUS_INITIATED
event.save()
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
ok_(not results['events'])
def test_events_with_event_without_location(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
# the "local" time this event starts is 12:30
ok_('12:30PM' in result['start_time'])
ok_('21 Jun 2012' in result['start_time'])
ok_('Mountain View' in result['location'])
event.location = None
event.save()
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
ok_('7:30PM' in result['start_time'])
ok_('21 Jun 2012' in result['start_time'])
ok_('Mountain View' not in result['location'])
def test_events_data_with_popcorn(self):
event = Event.objects.get(title='Test event')
event.upcoming = False
event.popcorn_url = 'https://webmaker.org/123'
event.save()
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
eq_(result['popcorn_url'], event.popcorn_url)
def test_events_data_with_latest_modify_date(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
ok_(results['events'])
eq_(results['max_modified'], event.modified.isoformat())
first, = results['events']
eq_(first['modified'], results['max_modified'])
def test_events_data_since(self):
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
results = json.loads(response.content)
ok_(results['events'])
response = self.client.get(url, {
'since': 'junk'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'since': results['max_modified']
})
eq_(response.status_code, 200)
results = json.loads(response.content)
ok_(not results['events'])
ok_(not results['max_modified'])
# go back a second in time
event = Event.objects.get(title='Test event')
max_modified = event.modified - datetime.timedelta(seconds=1)
response = self.client.get(url, {
'since': max_modified,
})
eq_(response.status_code, 200)
results = json.loads(response.content)
ok_(results['events'])
eq_(results['max_modified'], event.modified.isoformat())
def test_events_data_with_pictures_count(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
ok_('pictures' not in result)
with open(self.placeholder) as fp:
Picture.objects.create(
file=File(fp),
event=event,
)
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
eq_(result['pictures'], 1)
def test_events_data_with_has_picture(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
ok_('picture' not in result)
with open(self.placeholder) as fp:
picture = Picture.objects.create(
file=File(fp),
)
event.picture = picture
event.save()
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
eq_(result['picture'], picture.id)
def test_events_data_with_is_scheduled(self):
event = Event.objects.get(title='Test event')
assert event.status == Event.STATUS_SCHEDULED
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
results = json.loads(response.content)
result = results['events'][0]
ok_(result['is_scheduled'])
def test_events_data_with_limit(self):
event = Event.objects.get(title='Test event')
Event.objects.create(
title='Contributors Only Event',
slug='event2',
description=event.description,
start_time=event.start_time,
privacy=Event.PRIVACY_PUBLIC,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_PENDING,
)
Event.objects.create(
title='MoCo Only Event',
slug='event3',
description=event.description,
start_time=event.start_time,
privacy=Event.PRIVACY_PUBLIC,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_PENDING,
)
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(len(result['events']), 3)
response = self.client.get(url, {'limit': 2})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(len(result['events']), 2)
response = self.client.get(url, {'limit': -2})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(len(result['events']), 3)
def test_events_data_with_live_and_upcoming(self):
# some events will be annotated with is_live and is_upcoming
event = Event.objects.get(title='Test event')
now = timezone.now()
event2 = Event.objects.create(
title='Event 2',
slug='event2',
description=event.description,
start_time=now - datetime.timedelta(minutes=1),
privacy=Event.PRIVACY_PUBLIC,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_SCHEDULED
)
assert not event2.archive_time
assert event2 in Event.objects.approved()
assert event2 in Event.objects.live()
event3 = Event.objects.create(
title='Event 3',
slug='event3',
description=event.description,
start_time=now + datetime.timedelta(days=1),
privacy=Event.PRIVACY_PUBLIC,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_SCHEDULED
)
assert not event3.archive_time
assert event3 in Event.objects.approved()
assert event3 in Event.objects.upcoming()
assert event3 not in Event.objects.live()
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
titles = [x['title'] for x in result['events']]
eq_(titles, ['Event 3', 'Event 2', 'Test event'])
event = result['events'][0]
ok_(not event.get('is_live'))
ok_(event['is_upcoming'])
event = result['events'][1]
ok_(event['is_live'])
ok_(not event.get('is_upcoming'))
event = result['events'][2]
ok_(not event.get('is_live'))
ok_(not event.get('is_upcoming'))
def test_events_data_with_thumbnail(self):
event = Event.objects.get(title='Test event')
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data, placeholder_img=fp,
title=event.title)
)
eq_(response.status_code, 302)
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
assert result['events'][0]['title'] == event.title
def test_events_data_without_any_picture(self):
event = Event.objects.get(title='Test event')
event.placeholder_img = None
event.save()
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
ok_(result['events'][0]['nopicture'])
def test_events_data_pending_with_has_vidly_template(self):
event = Event.objects.get(title='Test event')
event.status = Event.STATUS_PENDING
event.save()
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
row = result['events'][0]
assert row['title'] == event.title
ok_(row['is_pending'])
ok_(not row.get('has_vidly_template'))
template = event.template
template.name = 'Vid.ly Fun'
template.save()
assert event.has_vidly_template()
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
row = result['events'][0]
ok_(row['is_pending'])
ok_(row.get('has_vidly_template'))
def test_events_seen_by_contributors(self):
# there should be one event of each level of privacy
event = Event.objects.get(title='Test event')
assert event.privacy == Event.PRIVACY_PUBLIC
event2 = Event.objects.create(
title='Contributors Only Event',
slug='event2',
description=event.description,
start_time=event.start_time,
privacy=Event.PRIVACY_CONTRIBUTORS,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_SCHEDULED,
)
event3 = Event.objects.create(
title='MoCo Only Event',
slug='event3',
description=event.description,
start_time=event.start_time,
privacy=Event.PRIVACY_COMPANY,
placeholder_img=event.placeholder_img,
location=event.location,
status=Event.STATUS_SCHEDULED,
)
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
result = json.loads(response.content)
titles = [x['title'] for x in result['events']]
ok_(event.title in titles)
ok_(event2.title in titles)
ok_(event3.title in titles)
# now log in as a contributor
contributor = User.objects.create_user(
'nigel', 'nigel@live.com', 'secret'
)
producers = Group.objects.create(name='Producer')
change_event_permission = Permission.objects.get(
codename='change_event'
)
change_event_others_permission = Permission.objects.get(
codename='change_event_others'
)
producers.permissions.add(change_event_permission)
producers.permissions.add(change_event_others_permission)
contributor.groups.add(producers)
contributor.is_staff = True
contributor.save()
UserProfile.objects.create(
user=contributor,
contributor=True
)
assert self.client.login(username='nigel', password='secret')
response = self.client.get(reverse('manage:events_data'))
eq_(response.status_code, 200)
result = json.loads(response.content)
titles = [x['title'] for x in result['events']]
ok_(event.title in titles)
ok_(event2.title in titles)
ok_(event3.title not in titles)
# you can edit the first two events
edit_url1 = reverse('manage:event_edit', kwargs={'id': event.id})
response = self.client.get(edit_url1)
eq_(response.status_code, 200)
edit_url2 = reverse('manage:event_edit', kwargs={'id': event2.id})
response = self.client.get(edit_url2)
eq_(response.status_code, 200)
edit_url3 = reverse('manage:event_edit', kwargs={'id': event3.id})
response = self.client.get(edit_url3)
eq_(response.status_code, 302)
def test_event_edit_slug(self):
"""Test editing an event - modifying an event's slug
results in a correct EventOldSlug."""
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:event_edit',
kwargs={'id': event.id}))
eq_(response.status_code, 200)
response_ok = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.id}),
dict(self.event_base_data, title='Tested event')
)
self.assertRedirects(response_ok, reverse('manage:events'))
ok_(EventOldSlug.objects.get(slug='test-event', event=event))
event = Event.objects.get(title='Tested event')
eq_(event.slug, 'tested-event')
eq_(event.modified_user, self.user)
response_fail = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.id}),
{
'title': 'not nearly enough data',
'status': Event.STATUS_SCHEDULED
}
)
eq_(response_fail.status_code, 200)
def test_event_edit_pin(self):
"""Test editing an event - modifying the pin"""
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('manage:event_edit',
kwargs={'id': event.id}))
eq_(response.status_code, 200)
ok_('Pin' in response.content)
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.id}),
dict(
self.event_base_data,
title='Tested event',
pin='1'
)
)
eq_(response.status_code, 200)
ok_('Pin too short' in response.content)
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.id}),
dict(self.event_base_data, title='Tested event',
pin='12345')
)
self.assertRedirects(response, reverse('manage:events'))
ok_(Event.objects.get(pin='12345'))
def test_event_edit_unset_location(self):
"""Test editing an event - modifying the pin"""
event = Event.objects.get(title='Test event')
assert event.location.timezone == 'US/Pacific'
assert event.start_time.hour == 19
assert event.start_time.minute == 30
assert event.start_time.tzinfo == utc
url = reverse('manage:event_edit', kwargs={'id': event.id})
response = self.client.get(url)
eq_(response.status_code, 200)
# the event's start_time is 19:30 in UTC,
# which is 12:30 in US/Pacific
ok_('12:30' in response.content)
# now, set the location to None
response = self.client.post(
url,
dict(self.event_base_data, title='Test event',
location='',
start_time=event.start_time.strftime('%Y-%m-%d %H:%M'))
)
eq_(response.status_code, 302)
event = Event.objects.get(title='Test event')
# the start time should not have changed
assert event.start_time.hour == 19
assert event.start_time.minute == 30
assert event.start_time.tzinfo == utc
response = self.client.get(url)
eq_(response.status_code, 200)
# now, because no timezone is known, we have to rely on UTC
ok_('12:30' not in response.content)
ok_('19:30' in response.content)
def test_event_edit_templates(self):
"""Event editing results in correct template environments."""
event = Event.objects.get(title='Test event')
url = reverse('manage:event_edit', kwargs={'id': event.id})
response_ok = self.client.post(
url,
dict(self.event_base_data, title='template edit',
template_environment='tv1=\'hi\'\ntv2===')
)
self.assertRedirects(response_ok, reverse('manage:events'))
event = Event.objects.get(id=event.id)
eq_(event.template_environment, {'tv1': "'hi'", 'tv2': '=='})
response_edit_page = self.client.get(url)
eq_(response_edit_page.status_code, 200,
'Edit page renders OK with a specified template environment.')
response_fail = self.client.post(
url,
dict(self.event_base_data, title='template edit',
template_environment='failenvironment')
)
eq_(response_fail.status_code, 200)
def test_event_archive(self):
"""Event archive page loads and shows correct archive_time behavior."""
event = Event.objects.get(title='Test event')
event.archive_time = None
# also, make it non-public
event.privacy = Event.PRIVACY_COMPANY
event.save()
url = reverse('manage:event_archive', kwargs={'id': event.id})
response_ok = self.client.get(url)
eq_(response_ok.status_code, 200)
# the `token_protection` should be forced on
ok_('Required for non-public events' in response_ok.content)
response_ok = self.client.post(url)
self.assertRedirects(response_ok, reverse('manage:events'))
event_modified = Event.objects.get(id=event.id)
eq_(event_modified.status, Event.STATUS_SCHEDULED)
now = (
timezone.now()
)
ok_(
abs(event_modified.archive_time - now)
<=
datetime.timedelta(1)
)
def test_event_archive_with_default_archive_template(self):
"""If you have a template that has `default_archive_template` True
then it should mention that on the event archive page."""
event = Event.objects.get(title='Test event')
event.archive_time = None
# also, make it non-public
event.privacy = Event.PRIVACY_COMPANY
event.save()
url = reverse('manage:event_archive', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
assert not Template.objects.filter(default_archive_template=True)
ok_('default_archive_template' not in response.content)
template = Template.objects.create(
name='Foo',
default_archive_template=True
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('default_archive_template' in response.content)
ok_('value="%s"' % template.pk in response.content)
def test_event_archive_with_upload(self):
"""event archive an event that came from a suggested event that has
a file upload."""
event = Event.objects.get(title='Test event')
event.archive_time = None
event.save()
upload = Upload.objects.create(
user=self.user,
url='http://s3.com/some.flv',
size=12345
)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
location = Location.objects.get(id=1)
SuggestedEvent.objects.create(
user=self.user,
title='TITLE',
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
first_submitted=now,
accepted=event,
upload=upload
)
url = reverse('manage:event_archive', kwargs={'id': event.id})
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('http://s3.com/some.flv' in response.content)
def test_event_archive_with_vidly_template(self):
"""Event archive page loads and shows correct archive_time behavior."""
vidly_template = Template.objects.create(name='Vid.ly HD')
event = Event.objects.get(title='Test event')
event.archive_time = None
event.save()
url = reverse('manage:event_archive', kwargs={'id': event.id})
response_ok = self.client.post(url, {
'template': vidly_template.pk,
'template_environment': 'tag=abc123',
})
self.assertRedirects(response_ok, reverse('manage:events'))
event_modified = Event.objects.get(id=event.id)
eq_(event_modified.archive_time, None)
eq_(event_modified.status, Event.STATUS_PENDING)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_event_archive_with_vidly_template_with_vidly_submission(
self, p_urllib2
):
"""Event archive an event with a tag that has a VidlySubmission
that was successful. If you do that it should immediately
set an archive_time."""
def mocked_urlopen(request):
return StringIO(get_custom_XML(tag='abc123'))
p_urllib2.urlopen = mocked_urlopen
vidly_template = Template.objects.create(name='Vid.ly HD')
event = Event.objects.get(title='Test event')
event.archive_time = None
assert event.status == Event.STATUS_SCHEDULED
event.save()
VidlySubmission.objects.create(
event=event,
url='https://aws.com/file.mov',
tag='abc123',
)
url = reverse('manage:event_archive', kwargs={'id': event.id})
response_ok = self.client.post(url, {
'template': vidly_template.pk,
'template_environment': 'tag=abc123',
})
self.assertRedirects(response_ok, reverse('manage:events'))
event_modified = Event.objects.get(id=event.id)
eq_(event_modified.status, Event.STATUS_SCHEDULED)
ok_(event_modified.archive_time)
def test_event_duplication(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="Test event"' in response.content)
ok_(
'value="%s"' % event.location_time.strftime('%Y-%m-%d %H:%M')
in response.content
)
def test_event_duplication_without_location(self):
event = Event.objects.get(title='Test event')
event.location = None
event.save()
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="Test event"' in response.content)
ok_(
'value="%s"' % event.start_time.strftime('%Y-%m-%d %H:%M')
in response.content
)
def test_event_duplication_with_discussion(self):
event = Event.objects.get(title='Test event')
discussion = Discussion.objects.create(
event=event,
enabled=True,
closed=False,
notify_all=True,
moderate_all=True
)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(bob)
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
data = {
'title': 'Different',
'description': event.description,
'short_description': event.short_description,
'location': event.location.pk,
'privacy': event.privacy,
'status': event.status,
'start_time': event.start_time.strftime('%Y-%m-%d %H:%M'),
'estimated_duration': event.estimated_duration,
'channels': [x.pk for x in event.channels.all()],
'enable_discussion': True,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
new_discussion = Discussion.objects.get(
event__title='Different'
)
eq_(new_discussion.notify_all, True)
eq_(new_discussion.moderate_all, True)
eq_(
list(new_discussion.moderators.all()),
list(discussion.moderators.all())
)
def test_event_duplication_with_curated_groups(self):
event = Event.objects.get(title='Test event')
CuratedGroup.objects.create(
event=event,
name='badasses'
)
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="badasses"' in response.content)
data = {
'title': 'Different',
'description': event.description,
'short_description': event.short_description,
'location': event.location.pk,
'privacy': event.privacy,
'status': event.status,
'start_time': event.start_time.strftime('%Y-%m-%d %H:%M'),
'estimated_duration': event.estimated_duration,
'channels': [x.pk for x in event.channels.all()],
'enable_discussion': True,
'curated_groups': 'badasses'
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
# this is expected to exist
ok_(CuratedGroup.objects.get(event__title='Different'))
def test_event_duplication_with_picture(self):
event = Event.objects.get(title='Test event')
with open(self.placeholder) as fp:
picture = Picture.objects.create(file=File(fp))
event.picture = picture
event.placeholder_img = None
event.save()
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
data = {
'title': 'Different',
'description': event.description,
'short_description': event.short_description,
'location': event.location.pk,
'privacy': event.privacy,
'status': event.status,
'start_time': event.start_time.strftime('%Y-%m-%d %H:%M'),
'estimated_duration': event.estimated_duration,
'channels': [x.pk for x in event.channels.all()],
'enable_discussion': True,
'picture': picture.id,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
event = Event.objects.get(title='Different')
eq_(event.picture, picture)
def test_event_duplication_custom_channels(self):
ch = Channel.objects.create(
name='Custom Culture',
slug='custom-culture'
)
event = Event.objects.get(title='Test event')
event.channels.filter(slug=settings.DEFAULT_CHANNEL_SLUG).delete()
event.channels.add(ch)
event.save()
url = reverse('manage:event_duplicate', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="Test event"' in response.content)
# expect a <option> tag selected with this name
tags = re.findall(
'<option (.*?)>([\w\s]+)</option>',
response.content,
flags=re.M
)
for attrs, value in tags:
if value == ch.name:
ok_('selected' in attrs)
def test_event_preview_shortcut(self):
# become anonymous (reverse what setUp() does)
self.client.logout()
# view it anonymously
event = Event.objects.get(title='Test event')
url = reverse('main:event', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 200)
edit_url = reverse('manage:event_edit', args=(event.pk,))
ok_(edit_url not in response.content)
# now log in
assert self.client.login(username='fake', password='fake')
# check that you can view the edit page
response = self.client.get(edit_url)
eq_(response.status_code, 200)
# and now the real test
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(edit_url in response.content)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_vidly_url_to_shortcode(self, p_urllib2):
event = Event.objects.get(title='Test event')
assert event.privacy == Event.PRIVACY_PUBLIC
url = reverse('manage:vidly_url_to_shortcode', args=(event.pk,))
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>All medias have been added.</Message>
<MessageCode>2.1</MessageCode>
<BatchID>47520</BatchID>
<Success>
<MediaShortLink>
<SourceFile>http://www.com/file.flv</SourceFile>
<ShortLink>8oxv6x</ShortLink>
<MediaID>13969839</MediaID>
<QRCode>http://vid.ly/8oxv6x/qrcodeimg</QRCode>
<HtmlEmbed>code code</HtmlEmbed>
<EmailEmbed>more code code</EmailEmbed>
</MediaShortLink>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
response = self.client.get(url)
eq_(response.status_code, 405)
response = self.client.post(url, {
'url': 'not a url'
})
eq_(response.status_code, 400)
match = URLMatch.objects.create(
name='Always Be Safe',
string='^http://'
)
URLTransform.objects.create(
match=match,
find='^http://',
replace_with='https://'
)
response = self.client.post(url, {
'url': 'http://www.com/'
})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['shortcode'], '8oxv6x')
eq_(content['url'], 'https://www.com/')
arguments = list(p_urllib2.Request.mock_calls[0])[1]
# the first argument is the URL
ok_('vid.ly' in arguments[0])
# the second argument is querystring containing the XML used
data = cgi.parse_qs(arguments[1])
xml = data['xml'][0]
ok_('<HD>YES</HD>' not in xml)
ok_('<HD>NO</HD>' in xml)
ok_('<SourceFile>https://www.com/</SourceFile>' in xml)
# re-fetch it
match = URLMatch.objects.get(pk=match.pk)
eq_(match.use_count, 1)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_vidly_url_to_shortcode_with_forced_protection(self, p_urllib2):
event = Event.objects.get(title='Test event')
event.privacy = Event.PRIVACY_COMPANY
event.save()
url = reverse('manage:vidly_url_to_shortcode', args=(event.pk,))
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>All medias have been added.</Message>
<MessageCode>2.1</MessageCode>
<BatchID>47520</BatchID>
<Success>
<MediaShortLink>
<SourceFile>http://www.com/file.flv</SourceFile>
<ShortLink>8oxv6x</ShortLink>
<MediaID>13969839</MediaID>
<QRCode>http://vid.ly/8oxv6x/qrcodeimg</QRCode>
<HtmlEmbed>code code</HtmlEmbed>
<EmailEmbed>more code code</EmailEmbed>
</MediaShortLink>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
response = self.client.post(url, {
'url': 'http://www.com/'
})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['shortcode'], '8oxv6x')
submission, = VidlySubmission.objects.all()
ok_(submission.token_protection)
ok_(not submission.hd)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_vidly_url_to_shortcode_with_hd(self, p_urllib2):
event = Event.objects.get(title='Test event')
url = reverse('manage:vidly_url_to_shortcode', args=(event.pk,))
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>All medias have been added.</Message>
<MessageCode>2.1</MessageCode>
<BatchID>47520</BatchID>
<Success>
<MediaShortLink>
<SourceFile>http://www.com/file.flv</SourceFile>
<ShortLink>8oxv6x</ShortLink>
<MediaID>13969839</MediaID>
<QRCode>http://vid.ly/8oxv6x/qrcodeimg</QRCode>
<HtmlEmbed>code code</HtmlEmbed>
<EmailEmbed>more code code</EmailEmbed>
</MediaShortLink>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
response = self.client.post(url, {
'url': 'http://www.com/',
'hd': True,
})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['shortcode'], '8oxv6x')
arguments = list(p_urllib2.Request.mock_calls[0])[1]
# the first argument is the URL
ok_('vid.ly' in arguments[0])
# the second argument is querystring containing the XML used
data = cgi.parse_qs(arguments[1])
xml = data['xml'][0]
ok_('<HD>YES</HD>' in xml)
ok_('<HD>NO</HD>' not in xml)
def test_events_autocomplete(self):
event = Event.objects.get(title='Test event')
event2 = Event.objects.create(
title='The Other Cool Title Event',
description=event.description,
start_time=event.start_time,
)
eq_(Event.objects.all().count(), 2)
url = reverse('manage:event_autocomplete')
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {'q': 'something', 'max': 'nan'})
eq_(response.status_code, 400)
response = self.client.get(url, {'q': 'eVEnt'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['Test event', 'The Other Cool Title Event'])
response = self.client.get(url, {'q': 'EVen', 'max': 1})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['Test event'])
response = self.client.get(url, {'q': 'E'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, [])
response = self.client.get(url, {'q': 'COOL'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['The Other Cool Title Event'])
response = self.client.get(url, {'q': 'COO'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['The Other Cool Title Event'])
response = self.client.get(url, {'q': 'THE'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, [])
# the autocomplete caches the same search
event2.title = event2.title.replace('Cool', 'Brilliant')
event2.save()
response = self.client.get(url, {'q': 'COol'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['The Other Cool Title Event'])
# but if the query is different it should work
response = self.client.get(url, {'q': 'brill'})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content, ['The Other Brilliant Title Event'])
def test_overwrite_old_slug(self):
# you create an event, change the slug and change it back
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Launch')
)
eq_(response.status_code, 302)
event = Event.objects.get(slug='launch')
url = reverse('main:event', args=('launch',))
response = self.client.get(url)
eq_(response.status_code, 200)
# now edit the slug
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title='Different title',
slug='different',)
)
eq_(response.status_code, 302)
assert Event.objects.get(slug='different')
old_url = url
url = reverse('main:event', args=('different',))
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.get(old_url)
eq_(response.status_code, 302)
self.assertRedirects(response, url)
# but suppose we change our mind back
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title='Launch title',
slug='launch',)
)
eq_(response.status_code, 302)
event = Event.objects.get(slug='launch')
old_url = url
url = reverse('main:event', args=('launch',))
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.get(old_url)
eq_(response.status_code, 302)
self.assertRedirects(response, url)
event.delete()
response = self.client.get(url)
eq_(response.status_code, 404)
response = self.client.get(old_url)
eq_(response.status_code, 404)
def test_overwrite_old_slug_twice(self):
# based on https://bugzilla.mozilla.org/show_bug.cgi?id=850742#c3
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Champagne')
)
eq_(response.status_code, 302)
event = Event.objects.get(slug='champagne')
# now edit the slug
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title=event.title,
slug='somethingelse')
)
# back again
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title=event.title,
slug='champagne')
)
# one last time
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title=event.title,
slug='somethingelse')
)
url = reverse('main:event', args=('somethingelse',))
response = self.client.get(url)
eq_(response.status_code, 200)
old_url = reverse('main:event', args=('champagne',))
response = self.client.get(old_url)
eq_(response.status_code, 302)
self.assertRedirects(response, url)
def test_editing_event_without_location(self):
# Edit an event that doesn't have a location, and keep it that way.
# It should not affect the start_time.
event = Event.objects.get(title='Test event')
event.location = None
event.save()
start_time_before = event.start_time
url = reverse('manage:event_edit', args=(event.id,))
response = self.client.post(url, {
'title': event.title,
'description': event.description,
'short_description': event.short_description,
'location': '',
'status': event.status,
'slug': event.slug,
'start_time': event.start_time.strftime('%Y-%m-%d %H:%M'),
'channels': [x.id for x in event.channels.all()],
'tags': [x.id for x in event.tags.all()],
'estimated_duration': event.estimated_duration,
'privacy': event.privacy,
})
eq_(response.status_code, 302)
# reload and check the start_time
event = Event.objects.get(id=event.id)
start_time_after = event.start_time
eq_(start_time_before, start_time_after)
def test_editing_event_tags(self):
# you create an event, edit the tags and mix the case
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Launch')
)
eq_(response.status_code, 302)
event = Event.objects.get(slug='launch')
# now edit the tags
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title=event.title,
tags='One, Two')
)
eq_(response.status_code, 302)
event = Event.objects.get(pk=event.pk)
ok_(Tag.objects.get(name='One') in list(event.tags.all()))
ok_(Tag.objects.get(name='Two') in list(event.tags.all()))
# Edit a tag that already exists
Tag.objects.create(name='three')
count_tags_before = Tag.objects.all().count()
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title=event.title,
tags='One, Two, THREE')
)
count_tags_after = Tag.objects.all().count()
eq_(count_tags_before, count_tags_after)
def test_event_request_with_clashing_staticpage(self):
StaticPage.objects.create(
url='/egg-plants/',
title='Egg Plants',
)
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Egg Plants')
)
eq_(response.status_code, 200)
ok_('Form errors' in response.content)
def test_event_edit_with_clashing_staticpage(self):
# if you edit the event and its slug already clashes with a
# StaticPage, there's little we can do, the StaticPage was added
# after
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_request'),
dict(self.event_base_data, placeholder_img=fp,
title='Champagne')
)
eq_(response.status_code, 302)
StaticPage.objects.create(
url='/egg-plants/',
title='Egg Plants',
)
event = Event.objects.get(slug='champagne')
# now edit the event without changing the slug
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title="New Title",
slug=event.slug)
)
# should be ok
eq_(response.status_code, 302)
response = self.client.post(
reverse('manage:event_edit', kwargs={'id': event.pk}),
dict(self.event_base_data,
title="New Title",
slug='egg-plants')
)
# should NOT be ok
eq_(response.status_code, 200)
ok_('Form errors' in response.content)
def test_event_edit_with_vidly_submissions(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('id="vidly-submission"' not in response.content)
template = event.template
template.name = 'Vid.ly Fun'
template.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('id="vidly-submission"' in response.content)
VidlySubmission.objects.create(
event=event,
url='http://www.file',
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('1 Vid.ly Submission' in response.content)
# a second one
VidlySubmission.objects.create(
event=event,
url='http://www.file.different.file',
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('2 Vid.ly Submissions' in response.content)
submissions_url = reverse(
'manage:event_vidly_submissions',
args=(event.pk,)
)
ok_(submissions_url in response.content)
@mock.patch('urllib2.urlopen')
def test_event_edit_with_stuck_pending(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_XML.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
event.template_environment = {'tag': 'abc123'}
event.status = Event.STATUS_PENDING
event.save()
url = reverse('manage:event_edit', args=(event.pk,))
template = event.template
template.name = 'Vid.ly Fun'
template.save()
submission = VidlySubmission.objects.create(
event=event,
url='http://www.file',
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('1 Vid.ly Submission' in response.content)
auto_archive_url = reverse(
'manage:event_archive_auto',
args=(event.pk,)
)
ok_(auto_archive_url not in response.content)
# the reason it's not there is because the VidlySubmission
# was made very recently.
# It will appear if the VidlySubmission does not exist
submission.submission_time -= datetime.timedelta(hours=1)
submission.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(auto_archive_url in response.content)
# or if there is no VidlySubmission at all
submission.delete()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(auto_archive_url in response.content)
response = self.client.post(auto_archive_url)
eq_(response.status_code, 302)
event = Event.objects.get(pk=event.pk)
eq_(event.status, Event.STATUS_SCHEDULED)
ok_(event.archive_time)
def test_event_vidly_submissions(self):
event = Event.objects.get(title='Test event')
template = event.template
template.name = 'Vid.ly Fun'
template.save()
url = reverse('manage:event_vidly_submissions', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
# add one
VidlySubmission.objects.create(
event=event,
url='http://something.long/url.file',
hd=True,
token_protection=False,
tag='abc123',
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('http://something.long/url.file' in response.content)
ok_('abc123' in response.content)
def test_event_vidly_submissions_with_active_submission(self):
event = Event.objects.get(title='Test event')
template = event.template
template.name = 'Vid.ly Fun'
template.save()
# add one
submission = VidlySubmission.objects.create(
event=event,
url='http://something.long/url.file',
hd=True,
token_protection=False,
tag='abc123',
)
assert 'Vid.ly' in event.template.name
event.template_environment = {'tag': 'abc123'}
event.save()
url = reverse('manage:event_vidly_submissions', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Actively used' in response.content)
# If you have a submission with token_protection and the event
# is public, you'll get a warning message.
submission.token_protection = True
submission.save()
assert event.privacy == Event.PRIVACY_PUBLIC
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Warning!' in response.content)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_delete_event_vidly_submissions(self, p_urllib2):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Success</Message>
<MessageCode>0.0</MessageCode>
<Success>
<MediaShortLink>abc456</MediaShortLink>
</Success>
<Errors>
<Error>
<SourceFile>http://www.com</SourceFile>
<ErrorCode>1</ErrorCode>
<Description>ErrorDescriptionK</Description>
<Suggestion>ErrorSuggestionK</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
event = Event.objects.get(title='Test event')
template = event.template
template.name = 'Vid.ly Fun'
template.save()
event.template_environment = {'tag': 'abc123'}
event.save()
url = reverse('manage:event_vidly_submissions', args=(event.pk,))
# add one
vs1 = VidlySubmission.objects.create(
event=event,
url='http://something.long/url.file',
hd=True,
token_protection=False,
tag='abc123',
)
vs2 = VidlySubmission.objects.create(
event=event,
url='http://something.long/url2.file',
hd=True,
token_protection=False,
tag='abc456',
)
vs3 = VidlySubmission.objects.create(
event=event,
url='http://something.long/url2.file.broken',
hd=True,
token_protection=False,
tag='xyz987',
)
response = self.client.post(url, {'id': [vs1.id, vs2.id, vs3.id]})
# because we're not allowed to delete vs1
eq_(response.status_code, 400)
response = self.client.post(url, {'id': [vs2.id, vs3.id]})
eq_(response.status_code, 302)
ok_(VidlySubmission.objects.filter(tag='abc123'))
ok_(not VidlySubmission.objects.filter(tag='abc456'))
# because it couldn't be deleted, we don't delete the record
ok_(VidlySubmission.objects.filter(tag='xyz987'))
# this time, do it by force
response = self.client.post(url, {'id': [vs3.id], 'forced': 1})
eq_(response.status_code, 302)
ok_(VidlySubmission.objects.filter(tag='abc123'))
ok_(not VidlySubmission.objects.filter(tag='xyz987'))
def test_delete_event_vidly_submissions_wo_tag(self):
event = Event.objects.get(title='Test event')
template = event.template
template.name = 'Vid.ly Fun'
template.save()
event.template_environment = {'tag': 'abc123'}
event.save()
submission = VidlySubmission.objects.create(
event=event,
url='http://something.long/url.file',
hd=True,
token_protection=False,
tag=None,
)
url = reverse('manage:event_vidly_submissions', args=(event.pk,))
response = self.client.post(url, {'id': [submission.id]})
eq_(response.status_code, 302)
# but it wouldn't be deleted
ok_(VidlySubmission.objects.get(id=submission.id))
response = self.client.post(
url,
{'id': [submission.id], 'forced': True}
)
eq_(response.status_code, 302)
ok_(not VidlySubmission.objects.filter(id=submission.id))
def test_event_vidly_submission(self):
event = Event.objects.get(title='Test event')
submission = VidlySubmission.objects.create(
event=event,
url='http://something.long/url.file',
hd=True,
token_protection=False,
tag='abc123',
submission_error='Something went wrong',
)
url = reverse(
'manage:event_vidly_submission',
args=(event.pk, submission.pk)
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['submission_error'], 'Something went wrong')
# or as fields
response = self.client.get(url, {'as_fields': True})
eq_(response.status_code, 200)
data = json.loads(response.content)
ok_(data['fields'])
first_field = data['fields'][0]
ok_('key' in first_field)
ok_('value' in first_field)
def test_event_hit_stats(self):
event = Event.objects.get(title='Test event')
now = timezone.now()
event.start_time = now - datetime.timedelta(days=400)
event.archive_time = now - datetime.timedelta(days=365)
event.save()
EventHitStats.objects.create(
event=event,
total_hits=101,
shortcode='abc123',
)
url = reverse('manage:event_hit_stats')
response = self.client.get(url)
eq_(response.status_code, 200)
# 101 / 365 days ~= 0.3
ok_(u'1\xa0year' in unicode(response.content, 'utf-8'))
ok_('101' in response.content)
ok_('0.3' in response.content)
def test_event_hit_stats_include_excluded(self):
event = Event.objects.get(title='Test event')
poison = Channel.objects.create(
name='Poison',
exclude_from_trending=True
)
event.channels.add(poison)
now = timezone.now()
event.start_time = now - datetime.timedelta(days=400)
event.archive_time = now - datetime.timedelta(days=365)
event.save()
EventHitStats.objects.create(
event=event,
total_hits=101,
shortcode='abc123',
)
url = reverse('manage:event_hit_stats')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(event.title not in response.content)
response = self.client.get(url, {'include_excluded': True})
eq_(response.status_code, 200)
ok_(event.title in unicode(response.content, 'utf-8'))
def test_event_hit_stats_archived_today(self):
event = Event.objects.get(title='Test event')
now = timezone.now()
event.start_time = now
event.archive_time = now
event.save()
EventHitStats.objects.create(
event=event,
total_hits=1,
shortcode='abc123',
)
url = reverse('manage:event_hit_stats')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(event.title not in response.content)
def test_hit_statistics_with_filter(self):
event = Event.objects.get(slug='test-event')
event_hit = Event.objects.create(
title='Test event hit',
slug='test-event-hit',
description=event.description,
privacy=Event.PRIVACY_PUBLIC,
placeholder_img=event.placeholder_img,
location=event.location,
start_time='2012-06-22T19:30:00Z',
archive_time='2012-06-22T20:00:00Z',
)
EventHitStats.objects.create(
event=event,
total_hits=101,
shortcode='abc123',
)
EventHitStats.objects.create(
event=event_hit,
total_hits=102,
shortcode='abc456',
)
response = self.client.get(
reverse('manage:event_hit_stats'),
{
'title': event_hit.title,
}
)
eq_(response.status_code, 200)
view_url_event = reverse('main:event', args=(event.slug,))
view_url_event_hit = reverse('main:event', args=(event_hit.slug,))
eq_(response.content.count(view_url_event_hit), 1)
eq_(response.content.count(view_url_event), 0)
def test_event_edit_without_vidly_template(self):
"""based on https://bugzilla.mozilla.org/show_bug.cgi?id=879725"""
event = Event.objects.get(title='Test event')
event.status = Event.STATUS_PENDING
event.archive_time = None
event.template = None
event.save()
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
def test_event_edit_with_suggested_event_comments(self):
event = Event.objects.get(title='Test event')
now = timezone.now()
suggested_event = SuggestedEvent.objects.create(
user=self.user,
title=event.title,
slug=event.slug,
description=event.description,
short_description=event.short_description,
location=event.location,
start_time=event.start_time,
accepted=event,
submitted=now,
)
SuggestedEventComment.objects.create(
suggested_event=suggested_event,
user=self.user,
comment='hi!\n"friend"'
)
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(
'Additional comments from original requested event'
in response.content
)
ok_('hi!<br>"friend"' in response.content)
def test_event_edit_of_retracted_submitted_event(self):
event = Event.objects.get(title='Test event')
now = timezone.now()
suggested_event = SuggestedEvent.objects.create(
user=self.user,
title=event.title,
slug=event.slug,
description=event.description,
short_description=event.short_description,
location=event.location,
start_time=event.start_time,
accepted=event,
submitted=now,
)
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
suggested_event.submitted = None
suggested_event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
def test_event_location_time_create_and_edit(self):
"""test that the input can be local time but the event is stored in
UTC"""
paris = Location.objects.create(
name='Paris',
timezone='Europe/Paris'
)
with open(self.placeholder) as fp:
data = dict(
self.event_base_data,
placeholder_img=fp,
title='In Paris!',
start_time='2013-09-25 10:00',
location=paris.pk,
)
response = self.client.post(
reverse('manage:event_request'),
data
)
eq_(response.status_code, 302)
event = Event.objects.get(title='In Paris!')
eq_(event.start_time.tzinfo, utc)
eq_(event.start_time.hour, 8)
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
# expect the Paris location to be pre-selected
ok_(
'<option value="%s" selected="selected">Paris</option>' % paris.pk
in response.content
)
start_time_tag = re.findall(
'<input.*?id="id_start_time".*?>',
response.content
)[0]
# expect to see the location time in there instead
ok_('10:00' in start_time_tag, start_time_tag)
# suppose now we want to make the event start at 13:00 in Paris
response = self.client.post(
url,
dict(
self.event_base_data,
location=paris.pk,
start_time='2013-09-25 13:00',
title='Different Now'
),
)
eq_(response.status_code, 302)
event = Event.objects.get(title='Different Now')
eq_(event.start_time.tzinfo, utc)
eq_(event.start_time.hour, 11)
# pull up the edit one more time
response = self.client.get(url)
eq_(response.status_code, 200)
start_time_tag = re.findall(
'<input.*?id="id_start_time".*?>',
response.content
)[0]
# expect to see the location time in there instead
ok_('13:00' in start_time_tag, start_time_tag)
@mock.patch('logging.error')
@mock.patch('requests.get')
def test_editing_event_curated_groups(self, rget, rlogging):
def mocked_get(url, **options):
if 'offset=0' in url:
return Response(GROUPS1)
if 'offset=500' in url:
return Response(GROUPS2)
raise NotImplementedError(url)
rget.side_effect = mocked_get
event = Event.objects.get(title='Test event')
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Curated groups' in response.content)
response = self.client.post(
url,
dict(self.event_base_data,
title=event.title,
curated_groups='Group 1, Group 2'
)
)
eq_(response.status_code, 302)
ok_(CuratedGroup.objects.get(event=event, name='Group 1'))
ok_(CuratedGroup.objects.get(event=event, name='Group 2'))
# edit it again
response = self.client.post(
url,
dict(self.event_base_data,
title=event.title,
curated_groups='Group 1, Group X'
)
)
eq_(response.status_code, 302)
ok_(CuratedGroup.objects.get(event=event, name='Group 1'))
ok_(CuratedGroup.objects.get(event=event, name='Group X'))
ok_(not CuratedGroup.objects.filter(event=event, name='Group 2'))
def test_event_upload(self):
event = Event.objects.get(title='Test event')
# there needs to exist a template which is the
# `default_archive_template` one
template, = Template.objects.all()
template.default_archive_template = True
template.save()
url = reverse('manage:event_upload', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
# if the event has a file upload, you'd expect to see a link to it here
upload = Upload.objects.create(
user=self.user,
url='https://aws.com/file.foo',
file_name='file.foo',
size=123456,
event=event,
)
event.upload = upload
event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('file.foo' in response.content)
def test_event_upload_automation_details(self):
"""When you go to the event upload there are details embedded in
the page that is used by the javascript automation steps (which
are quite complex).
Here we just want to test that all those details are there as
expected.
"""
event = Event.objects.get(title='Test event')
# there needs to exist a template which is the
# `default_archive_template` one
template, = Template.objects.all()
template.default_archive_template = True
template.content = """
<iframe src="{{ key }}"></iframe>
"""
template.save()
url = reverse('manage:event_upload', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
element, = doc('form#upload')
eq_(
element.attrib['data-vidly-shortcut-url'],
reverse('manage:vidly_url_to_shortcode', args=(event.id,))
)
eq_(
element.attrib['data-event-archive-url'],
reverse('manage:event_archive', args=(event.id,))
)
eq_(
json.loads(element.attrib['data-vidly-submit-details']),
{
'email': self.user.email,
'hd': True,
'token_protection': False
}
)
assert event.privacy == Event.PRIVACY_PUBLIC
eq_(
json.loads(element.attrib['data-event-archive-details']),
{
'template': template.id,
'shortcode_key_name': 'key'
}
)
def test_event_transcript(self):
event = Event.objects.get(title='Test event')
event.transcript = "Some content"
event.save()
url = reverse('manage:event_transcript', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Some content' in response.content)
response = self.client.post(url, {'transcript': 'New content'})
eq_(response.status_code, 302)
event = Event.objects.get(pk=event.pk)
eq_(event.transcript, 'New content')
@mock.patch('requests.get')
def test_event_transcript_scraping(self, rget):
def mocked_get(url, **options):
eq_(
url,
'https://etherpad.mozilla.org/ep/pad/export/foo-bar/latest?'
'format=txt'
)
return _Response(
"Content here",
200
)
rget.side_effect = mocked_get
event = Event.objects.get(title='Test event')
event.additional_links = """
https://etherpad.mozilla.org/foo-bar
"""
event.save()
url = reverse('manage:event_transcript', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('https://etherpad.mozilla.org/foo-bar' in response.content)
response = self.client.get(url, {
'urls': ['https://etherpad.mozilla.org/foo-bar']
})
eq_(response.status_code, 200)
ok_('Content here' in response.content)
@mock.patch('requests.get')
def test_event_transcript_scraping_not_working(self, rget):
def mocked_get(url, **options):
eq_(
url,
'https://etherpad.mozilla.org/ep/pad/export/foo-bar/latest?'
'format=txt'
)
return _Response(
None,
500
)
rget.side_effect = mocked_get
event = Event.objects.get(title='Test event')
event.additional_links = """
https://etherpad.mozilla.org/foo-bar
"""
event.save()
url = reverse('manage:event_transcript', args=(event.pk,))
response = self.client.get(url, {
'urls': ['https://etherpad.mozilla.org/foo-bar']
})
eq_(response.status_code, 200)
ok_('Some things could not be scraped correctly')
def test_stop_live_event(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
event.archive_time = None
now = timezone.now()
nowish = now - datetime.timedelta(minutes=1)
event.start_time = nowish
event.save()
assert event in Event.objects.live()
# there needs to exist a template which is the
# `default_archive_template` one
template, = Template.objects.all()
template.default_archive_template = True
template.save()
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
url = reverse('manage:stop_live_event', args=(event.pk,))
ok_(url in response.content)
# let's click it
response = self.client.post(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('manage:event_upload', args=(event.pk,))
)
# reload the event and it should have changed status
event = Event.objects.get(pk=event.pk)
eq_(event.status, Event.STATUS_PROCESSING)
def test_event_redirect_thumbnail(self):
event = Event.objects.get(title='Test event')
with open(self.placeholder) as fp:
event.placeholder_img = File(fp)
event.save()
assert event.placeholder_img
url = reverse('manage:redirect_event_thumbnail', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 302)
thumbnail_url = response['Location']
ok_(settings.MEDIA_URL in thumbnail_url)
def test_event_edit_with_hit_statistics(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_edit', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Total Hits:' not in response.content)
event.template_environment = {'tag': 'abc123'}
event.save()
EventHitStats.objects.create(
event=event,
total_hits=1234,
shortcode=event.template_environment['tag']
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Total Hits:' in response.content)
ok_('1,234' in response.content)
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_is_privacy_vidly_mismatch(self, p_urllib2):
def mocked_urlopen(request):
xml_string = get_custom_XML(tag='abc123')
assert '<Private>false</Private>'
return StringIO(xml_string)
p_urllib2.urlopen = mocked_urlopen
event = Event.objects.get(title='Test event')
event.template = None
event.template_environment = {}
event.save()
# no template associated with event
ok_(not is_privacy_vidly_mismatch(event))
event.template = Template.objects.create(name='Nothing', content='x')
event.save()
# template not named Vid.ly something
ok_(not is_privacy_vidly_mismatch(event))
event.template.name = 'Vid.LY Something'
event.template.save()
# no template_environment['tag']
ok_(not is_privacy_vidly_mismatch(event))
event.template_environment['tag'] = 'abc123'
event.save()
assert event.privacy == Event.PRIVACY_PUBLIC, event.privacy
# doesn't mismatch fixture
ok_(not is_privacy_vidly_mismatch(event))
event.privacy = Event.PRIVACY_COMPANY
event.save()
# finally a mismatch!
ok_(is_privacy_vidly_mismatch(event))
@mock.patch('airmozilla.manage.vidly.urllib2.urlopen')
def test_event_edit_with_privacy_vidly_mismatch(self, p_urlopen):
def mocked_urlopen(request):
xml_sent = urllib.unquote_plus(request.data)
if 'UpdateMedia' in xml_sent:
xml_string = SAMPLE_MEDIA_UPDATED_XML
else:
xml_string = get_custom_XML(tag='abc123')
# it's just a query
assert '<Private>false</Private>'
return StringIO(xml_string)
p_urlopen.side_effect = mocked_urlopen
vidly_template = Template.objects.create(
name='Vid.ly Something',
content=''
)
event = Event.objects.get(title='Test event')
assert event.privacy == Event.PRIVACY_PUBLIC
# let's make sure there is a VidlySubmission of this too
submission = VidlySubmission.objects.create(
tag='abc123',
event=event,
url='https://s3.com',
token_protection=False
)
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data, placeholder_img=fp,
title=event.title,
template=vidly_template.id,
template_environment="tag=abc123",
privacy=Event.PRIVACY_COMPANY)
)
eq_(response.status_code, 302)
# reload the submission
submission = VidlySubmission.objects.get(id=submission.id)
ok_(submission.token_protection)
@mock.patch('airmozilla.manage.vidly.urllib2.urlopen')
def test_event_edit_with_privacy_vidly_mismatch_error(self, p_urlopen):
def mocked_urlopen(request):
xml_sent = urllib.unquote_plus(request.data)
if 'UpdateMedia' in xml_sent:
xml_string = SAMPLE_MEDIA_UPDATE_FAILED_XML
else:
xml_string = get_custom_XML(tag='abc123')
# it's just a query
assert '<Private>false</Private>'
return StringIO(xml_string)
p_urlopen.side_effect = mocked_urlopen
vidly_template = Template.objects.create(
name='Vid.ly Something',
content=''
)
event = Event.objects.get(title='Test event')
assert event.privacy == Event.PRIVACY_PUBLIC
# let's make sure there is a VidlySubmission of this too
submission = VidlySubmission.objects.create(
tag='abc123',
event=event,
url='https://s3.com',
token_protection=False
)
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data, placeholder_img=fp,
title=event.title,
template=vidly_template.id,
template_environment="tag=abc123",
privacy=Event.PRIVACY_COMPANY)
)
# Note that even though the UpdateMedia failed,
# it still goes ahead with the redirect.
eq_(response.status_code, 302)
# reload the submission
submission = VidlySubmission.objects.get(id=submission.id)
# it should not have changed
ok_(not submission.token_protection)
def test_edit_event_archive_time(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_archive_time', args=(event.id,))
assert event.archive_time
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(event.get_status_display() in response.content)
# the input converts the time to the local timezone of
ok_(
event.archive_time.strftime('%Y-%m-%d %H:%M:%S') in
response.content
)
response = self.client.post(url, {
'archive_time': ''
})
eq_(response.status_code, 302)
event = Event.objects.get(id=event.id)
eq_(event.archive_time, None)
response = self.client.post(url, {
'archive_time': '2015-04-01 12:00:00'
})
eq_(response.status_code, 302)
event = Event.objects.get(id=event.id)
dt = datetime.datetime(2015, 4, 1, 12, 0, 0)
dt = dt.replace(tzinfo=utc)
eq_(event.archive_time, dt)
@mock.patch('airmozilla.manage.views.events.boto.connect_s3')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_event_delete(self, p_urllib2, mocked_connect_s3):
assert not Upload.objects.all()
assert not VidlySubmission.objects.all()
assert not Picture.objects.all()
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Success</Message>
<MessageCode>0.0</MessageCode>
<Success>
<MediaShortLink>8oxv6x</MediaShortLink>
</Success>
<Errors>
<Error>
<SourceFile>http://www.com</SourceFile>
<ErrorCode>1</ErrorCode>
<Description>ErrorDescriptionK</Description>
<Suggestion>ErrorSuggestionK</Suggestion>
</Error>
</Errors>
</Response>
""")
sent_xml_strings = []
def mocked_Request(url, data, **kwargs):
sent_xml_strings.append(urllib.unquote_plus(data))
return mock.MagicMock()
p_urllib2.Request = mocked_Request
p_urllib2.urlopen = mocked_urlopen
event = Event.objects.get(title='Test event')
url = reverse('manage:event_delete', args=(event.id,))
response = self.client.post(url)
# because the event is not in state of removed
eq_(response.status_code, 404)
event.status = Event.STATUS_REMOVED
event.save()
# create some uploads
Upload.objects.create(
user=self.user,
event=event,
url='http://aws.com/file1.mov',
size=98765
)
Upload.objects.create(
user=self.user,
event=event,
url='http://aws.com/file2.mov',
size=123456
)
# create some vidly submissions
VidlySubmission.objects.create(
event=event,
tag='abc123',
)
VidlySubmission.objects.create(
event=event,
tag='xyz987',
)
# Create some pictures
file_paths = []
for i in range(3):
with open(self.placeholder) as fp:
picture = Picture.objects.create(
event=event,
file=File(fp),
notes=str(i)
)
assert os.path.isfile(picture.file.path)
file_paths.append(picture.file.path)
# associate the event with the last picture
event.picture = picture
event.save()
# finally, try to delete it again
response = self.client.post(url)
eq_(response.status_code, 302)
mocked_connect_s3().get_bucket.assert_called_once_with(
settings.S3_UPLOAD_BUCKET
)
mocked_connect_s3().get_bucket().delete_key.assert_any_call(
'/file2.mov'
)
mocked_connect_s3().get_bucket().delete_key.assert_any_call(
'/file1.mov'
)
eq_(len(sent_xml_strings), 2)
ok_('<Action>DeleteMedia</Action>' in sent_xml_strings[0])
ok_('<Action>DeleteMedia</Action>' in sent_xml_strings[1])
ok_('<MediaShortLink>xyz987</MediaShortLink>' in sent_xml_strings[0])
ok_('<MediaShortLink>abc123</MediaShortLink>' in sent_xml_strings[1])
for file_path in file_paths:
ok_(not os.path.isfile(file_path))
# We can do this because there weren't any of these before the
# test started.
ok_(not Upload.objects.all())
ok_(not VidlySubmission.objects.all())
ok_(not Picture.objects.all())
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_event_fetch_duration(self, mock_popen, rhead):
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = destination = None
if command[1] == '-i':
# doing a fetch info
url = command[2]
elif command[1] == '-ss':
# screen capturing
destination = command[-1]
assert os.path.isdir(os.path.dirname(destination))
else:
raise NotImplementedError(command)
ffmpeged_urls.append(url)
# sample_jpg = self.sample_jpg
class Inner:
def communicate(self):
out = err = ''
if url is not None:
if 'some.flv' in url:
err = """
Duration: 00:00:11.01, start: 0.000000, bitrate: 1076 kb/s
"""
else:
raise NotImplementedError(url)
# elif destination is not None:
# shutil.copyfile(sample_jpg, destination)
else:
raise NotImplementedError()
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
def mocked_head(url, **options):
return Response(
'',
200
)
rhead.side_effect = mocked_head
event = Event.objects.get(title='Test event')
assert not event.duration
url = reverse('manage:event_fetch_duration', args=(event.id,))
eq_(self.client.get(url).status_code, 405)
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {'duration': None})
event.upload = Upload.objects.create(
user=self.user,
url='http://s3domaincom/some.flv',
size=12345
)
event.save()
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {'duration': 11})
event = Event.objects.get(id=event.id)
eq_(event.duration, 11)
eq_(len(ffmpeged_urls), 1)
# hit it a second time
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {'duration': 11})
eq_(len(ffmpeged_urls), 1)
@mock.patch('requests.head')
@mock.patch('subprocess.Popen')
def test_event_fetch_screencaptures(self, mock_popen, rhead):
ffmpeged_urls = []
def mocked_popen(command, **kwargs):
url = destination = None
if command[1] == '-ss':
# screen capturing
destination = command[-1]
assert os.path.isdir(os.path.dirname(destination))
else:
raise NotImplementedError(command)
ffmpeged_urls.append(url)
sample_jpg = 'airmozilla/manage/tests/presenting.jpg'
class Inner:
def communicate(self):
out = err = ''
if destination is not None:
shutil.copyfile(sample_jpg, destination)
else:
raise NotImplementedError()
return out, err
return Inner()
mock_popen.side_effect = mocked_popen
def mocked_head(url, **options):
return Response(
'',
200
)
rhead.side_effect = mocked_head
event = Event.objects.get(title='Test event')
assert not event.duration
url = reverse('manage:event_fetch_screencaptures', args=(event.id,))
eq_(self.client.get(url).status_code, 405)
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {'pictures': 0})
event.upload = Upload.objects.create(
user=self.user,
url='http://s3domaincom/some.flv',
size=12345
)
event.save()
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {'pictures': 0})
event.duration = 12
event.save()
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {
'pictures': settings.SCREENCAPTURES_NO_PICTURES
})
assert Picture.objects.filter(event=event).count()
eq_(len(ffmpeged_urls), settings.SCREENCAPTURES_NO_PICTURES)
# hit it a second time
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(json.loads(response.content), {
'pictures': settings.SCREENCAPTURES_NO_PICTURES
})
eq_(len(ffmpeged_urls), settings.SCREENCAPTURES_NO_PICTURES)
|
function2/public
|
refs/heads/master
|
math/sha256.py
|
1
|
#!/usr/bin/env python3
# Simple (SLOW) program to calculate the SHA-256.
# Copyright 2015, Michael Seyfert <michael@codesand.org>
# http://codesand.org
# License GPL v2
import sys, struct
K = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
W = [0x0]*64
def rot(x,n):
return (x >> n) | ((x & ((1<<n)-1))<<(32-n))
def Ch(x,y,z):
return (x & y) ^ (~x & z)
def Maj(x,y,z):
return (x & y) ^ (x & z) ^ (y & z)
def Ea(x):
return rot(x,2) ^ rot(x,13) ^ rot(x,22)
def Eb(x):
return rot(x,6) ^ rot(x,11) ^ rot(x,25)
def ea(x):
return rot(x,7) ^ rot(x,18) ^ (x >> 3)
def eb(x):
return rot(x,17) ^ rot(x,19) ^ (x >> 10)
def compression_func(r,j):
t1 = r[7] + Eb(r[4]) + Ch(r[4],r[5],r[6]) + K[j] + W[j]
t2 = Ea(r[0]) + Maj(r[0], r[1], r[2])
r[7] = r[6]
r[6] = r[5]
r[5] = r[4]
r[4] = (r[3] + t1) & 0xffffffff
r[3] = r[2]
r[2] = r[1]
r[1] = r[0]
r[0] = (t1 + t2) & 0xffffffff
def sha256(b):
"""
Compute the SHA-256 of input variable 'b' which is of builtin type
bytes (a string of bytes).
Returns the hash as a list[] of 8 unsigned (32-bit) integers.
"""
l = 8 * len(b) # l is the length of the message, in bits.
k = (448 - (l + 1)) % 512 # python uses the mathematical modulo definition.
# Add to b to fill the input bits to a multiple of 512 bits.
b += (b'\x80' + bytes(int((k-7)/8)) + struct.pack(">q", l))
#initial H values.
h = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19]
m = [0]*16 # holds the next 512 bits of the input message.
r = [0]*8 # 8 registers used.
# The message is read 64 bytes (512 bits) each iteration.
N = len(b)//64
for i in range(N):
# Set the 16 32-bit M values. These are M_j^(i) (0 <= j <= 15)
for j in range(16):
m[j] = struct.unpack(">I",b[i*64+4*j : i*64+4*j+4])[0]
# Initialize the registers with the (i-1)st intermediate hash value.
for j in range(8):
r[j] = h[j]
# Setup the expanded message blocks W[0] to W[63].
for j in range(16):
W[j] = m[j]
for j in range(16,64):
W[j] = (eb(W[j-2]) + W[j-7] + ea(W[j-15]) + W[j-16]) & 0xffffffff
# Apply the compression functions to update the registers, from j=0 to 63.
for j in range(64):
compression_func(r,j)
# # Debug print
# print(hex(r[0]),hex(r[1]),hex(r[2]),hex(r[3]),
# hex(r[4]),hex(r[5]),hex(r[6]),hex(r[7]))
# Compute the new hash value.
for j in range(8):
h[j] = (r[j] + h[j]) & 0xffffffff
return h
if __name__ == "__main__":
h = sha256( open(sys.argv[1], "rb").read() )
print("".join([hex(h[k])[2:]+' ' for k in range(8)]))
|
sposs/DIRAC
|
refs/heads/integration
|
Core/DISET/private/Transports/SSL/__init__.py
|
38
|
# $HeadURL$
__RCSID__ = "$Id$"
|
saurabh6790/aimobilize-lib-backup
|
refs/heads/master
|
webnotes/widgets/calendar.py
|
34
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
import json
@webnotes.whitelist()
def update_event(args, field_map):
args = webnotes._dict(json.loads(args))
field_map = webnotes._dict(json.loads(field_map))
w = webnotes.bean(args.doctype, args.name)
w.doc.fields[field_map.start] = args[field_map.start]
w.doc.fields[field_map.end] = args[field_map.end]
w.save()
|
francois-travais/wedding-rest
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: utf8 -*-
from setuptools import setup
setup(
# Application name:
name="wedding_rest",
# Version number (initial):
version="1.0.0",
# Application author details:
author="Francois Travais",
author_email="francois.travais@gmail.com",
# Packages
packages=["wedding_rest"],
include_package_data=True,
# Details
url="https://github.com/francois-travais/wedding-rest.git",
license="GNU GPL v2",
description="REST services of my wedding website",
# Dependent packages (distributions)
install_requires=[
"Flask",
"pymongo",
"Flask-Cors",
],
zip_safe=False
)
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/pyasn1/pyasn1/__init__.py
|
193
|
import sys
# http://www.python.org/dev/peps/pep-0396/
__version__ = '0.1.7'
if sys.version_info[:2] < (2, 4):
raise RuntimeError('PyASN1 requires Python 2.4 or later')
|
gabrielfalcao/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/modeltests/fixtures_model_package/__init__.py
|
45382
| |
edisonlz/fruit
|
refs/heads/master
|
web_project/base/site-packages/south/db/mysql.py
|
20
|
# MySQL-specific implementations for south
# Original author: Andrew Godwin
# Patches by: F. Gabriel Gosselin <gabrielNOSPAM@evidens.ca>
from south.db import generic
from south.db.generic import DryRunError, INVALID
from south.logger import get_logger
def delete_column_constraints(func):
"""
Decorates column operation functions for MySQL.
Deletes the constraints from the database and clears local cache.
"""
def _column_rm(self, table_name, column_name, *args, **opts):
# Delete foreign key constraints
try:
self.delete_foreign_key(table_name, column_name)
except ValueError:
pass # If no foreign key on column, OK because it checks first
# Delete constraints referring to this column
try:
reverse = self._lookup_reverse_constraint(table_name, column_name)
for cname, rtable, rcolumn in reverse:
self.delete_foreign_key(rtable, rcolumn)
except DryRunError:
pass
return func(self, table_name, column_name, *args, **opts)
return _column_rm
def copy_column_constraints(func):
"""
Decorates column operation functions for MySQL.
Determines existing constraints and copies them to a new column
"""
def _column_cp(self, table_name, column_old, column_new, *args, **opts):
# Copy foreign key constraint
try:
constraint = self._find_foreign_constraints(table_name, column_old)[0]
(ftable, fcolumn) = self._lookup_constraint_references(table_name, constraint)
if ftable and fcolumn:
fk_sql = self.foreign_key_sql(
table_name, column_new, ftable, fcolumn)
get_logger().debug("Foreign key SQL: " + fk_sql)
self.add_deferred_sql(fk_sql)
except IndexError:
pass # No constraint exists so ignore
except DryRunError:
pass
# Copy constraints referring to this column
try:
reverse = self._lookup_reverse_constraint(table_name, column_old)
for cname, rtable, rcolumn in reverse:
fk_sql = self.foreign_key_sql(
rtable, rcolumn, table_name, column_new)
self.add_deferred_sql(fk_sql)
except DryRunError:
pass
return func(self, table_name, column_old, column_new, *args, **opts)
return _column_cp
def invalidate_table_constraints(func):
"""
For MySQL we grab all table constraints simultaneously, so this is
effective.
It further solves the issues of invalidating referred table constraints.
"""
def _cache_clear(self, table, *args, **opts):
db_name = self._get_setting('NAME')
if db_name in self._constraint_cache:
del self._constraint_cache[db_name]
if db_name in self._reverse_cache:
del self._reverse_cache[db_name]
if db_name in self._constraint_references:
del self._constraint_references[db_name]
return func(self, table, *args, **opts)
return _cache_clear
class DatabaseOperations(generic.DatabaseOperations):
"""
MySQL implementation of database operations.
MySQL has no DDL transaction support This can confuse people when they ask
how to roll back - hence the dry runs, etc., found in the migration code.
"""
backend_name = "mysql"
alter_string_set_type = ''
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
delete_primary_key_sql = "ALTER TABLE %(table)s DROP PRIMARY KEY"
delete_foreign_key_sql = "ALTER TABLE %(table)s DROP FOREIGN KEY %(constraint)s"
delete_unique_sql = "ALTER TABLE %s DROP INDEX %s"
rename_table_sql = "RENAME TABLE %s TO %s;"
allows_combined_alters = False
has_check_constraints = False
geom_types = ['geometry', 'point', 'linestring', 'polygon']
text_types = ['text', 'blob']
def __init__(self, db_alias):
self._constraint_references = {}
self._reverse_cache = {}
super(DatabaseOperations, self).__init__(db_alias)
def _is_valid_cache(self, db_name, table_name):
cache = self._constraint_cache
# we cache the whole db so if there are any tables table_name is valid
return db_name in cache and cache[db_name].get(table_name, None) is not INVALID
def _fill_constraint_cache(self, db_name, table_name):
# for MySQL grab all constraints for this database. It's just as cheap as a single column.
self._constraint_cache[db_name] = {}
self._constraint_cache[db_name][table_name] = {}
self._reverse_cache[db_name] = {}
self._constraint_references[db_name] = {}
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`, kc.`table_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s
"""
rows = self.execute(name_query, [db_name])
if not rows:
return
cnames = {}
for constraint, column, table, ref_table, ref_column in rows:
key = (table, constraint)
cnames.setdefault(key, set())
cnames[key].add((column, ref_table, ref_column))
type_query = """
SELECT c.constraint_name, c.table_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s
"""
rows = self.execute(type_query, [db_name])
for constraint, table, kind in rows:
key = (table, constraint)
self._constraint_cache[db_name].setdefault(table, {})
try:
cols = cnames[key]
except KeyError:
cols = set()
for column_set in cols:
(column, ref_table, ref_column) = column_set
self._constraint_cache[db_name][table].setdefault(column, set())
if kind == 'FOREIGN KEY':
self._constraint_cache[db_name][table][column].add((kind,
constraint))
# Create constraint lookup, see constraint_references
self._constraint_references[db_name][(table,
constraint)] = (ref_table, ref_column)
# Create reverse table lookup, reverse_lookup
self._reverse_cache[db_name].setdefault(ref_table, {})
self._reverse_cache[db_name][ref_table].setdefault(ref_column,
set())
self._reverse_cache[db_name][ref_table][ref_column].add(
(constraint, table, column))
else:
self._constraint_cache[db_name][table][column].add((kind,
constraint))
def connection_init(self):
"""
Run before any SQL to let database-specific config be sent as a command,
e.g. which storage engine (MySQL) or transaction serialisability level.
"""
cursor = self._get_connection().cursor()
if self._has_setting('STORAGE_ENGINE') and self._get_setting('STORAGE_ENGINE'):
cursor.execute("SET storage_engine=%s;" % self._get_setting('STORAGE_ENGINE'))
def start_transaction(self):
super(DatabaseOperations, self).start_transaction()
self.execute("SET FOREIGN_KEY_CHECKS=0;")
@copy_column_constraints
@delete_column_constraints
@invalidate_table_constraints
def rename_column(self, table_name, old, new):
if old == new or self.dry_run:
return []
rows = [x for x in self.execute('DESCRIBE %s' % (self.quote_name(table_name),)) if x[0] == old]
if not rows:
raise ValueError("No column '%s' in '%s'." % (old, table_name))
params = (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
rows[0][1],
rows[0][2] == "YES" and "NULL" or "NOT NULL",
rows[0][4] and "DEFAULT " or "",
rows[0][4] and "%s" or "",
rows[0][5] or "",
)
sql = 'ALTER TABLE %s CHANGE COLUMN %s %s %s %s %s %s %s;' % params
if rows[0][4]:
self.execute(sql, (rows[0][4],))
else:
self.execute(sql)
@delete_column_constraints
def delete_column(self, table_name, name):
super(DatabaseOperations, self).delete_column(table_name, name)
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
super(DatabaseOperations, self).rename_table(old_table_name,
table_name)
@invalidate_table_constraints
def delete_table(self, table_name):
super(DatabaseOperations, self).delete_table(table_name)
def _lookup_constraint_references(self, table_name, cname):
"""
Provided an existing table and constraint, returns tuple of (foreign
table, column)
"""
db_name = self._get_setting('NAME')
try:
return self._constraint_references[db_name][(table_name, cname)]
except KeyError:
return None
def _lookup_reverse_constraint(self, table_name, column_name=None):
"""Look for the column referenced by a foreign constraint"""
db_name = self._get_setting('NAME')
if self.dry_run:
raise DryRunError("Cannot get constraints for columns.")
if not self._is_valid_cache(db_name, table_name):
# Piggy-back on lookup_constraint, ensures cache exists
self.lookup_constraint(db_name, table_name)
try:
table = self._reverse_cache[db_name][table_name]
if column_name == None:
return [(y, tuple(y)) for x, y in table.items()]
else:
return tuple(table[column_name])
except KeyError:
return []
def _field_sanity(self, field):
"""
This particular override stops us sending DEFAULTs for BLOB/TEXT columns.
"""
# MySQL does not support defaults for geometry columns also
type = self._db_type_for_alter_column(field).lower()
is_geom = True in [type.find(t) > -1 for t in self.geom_types]
is_text = True in [type.find(t) > -1 for t in self.text_types]
if is_geom or is_text:
field._suppress_default = True
return field
def _alter_set_defaults(self, field, name, params, sqls):
"""
MySQL does not support defaults on text or blob columns.
"""
type = params['type']
# MySQL does not support defaults for geometry columns also
is_geom = True in [type.find(t) > -1 for t in self.geom_types]
is_text = True in [type.find(t) > -1 for t in self.text_types]
if not is_geom and not is_text:
super(DatabaseOperations, self)._alter_set_defaults(field, name, params, sqls)
|
40223117cda/2015_cd0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/marshal.py
|
1265
|
from json import *
|
Instanssi/Instanssi.org
|
refs/heads/master
|
Instanssi/store/methods/paytrail.py
|
1
|
# -*- coding: utf-8 -*-
from Instanssi.common.misc import get_url
from django.conf import settings
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import render, get_object_or_404
from Instanssi.store.models import StoreTransaction
from Instanssi.store.utils import paytrail, ta_common
# Logging related
import logging
logger = logging.getLogger(__name__)
def start_process(ta):
"""This should be used to start the paytrail payment process.
Will redirect as necessary."""
product_list = []
for store_item, item_variant, purchase_price in ta.get_distinct_storeitems_and_prices():
count = ta.get_storeitem_count(store_item, variant=item_variant)
product_list.append({
'title': '{}, {}'.format(store_item.name, item_variant.name) if item_variant else store_item.name,
'code': '{}:{}'.format(store_item.id, item_variant.id) if item_variant else str(store_item.id),
'amount': str(count),
'price': str(purchase_price),
'vat': '0',
'type': 1,
})
data = {
'orderNumber': str(ta.id),
'currency': 'EUR',
'locale': 'fi_FI',
'urlSet': {
'success': get_url(reverse('store:pm:paytrail-success')),
'failure': get_url(reverse('store:pm:paytrail-failure')),
'notification': get_url(reverse('store:pm:paytrail-notify')),
'pending': '',
},
'orderDetails': {
'includeVat': 1,
'contact': {
'telephone': ta.telephone,
'mobile': ta.mobile,
'email': ta.email,
'firstName': ta.firstname,
'lastName': ta.lastname,
'companyName': ta.company,
'address': {
'street': ta.street,
'postalCode': ta.postalcode,
'postalOffice': ta.city,
'country': ta.country.code
}
},
'products': product_list,
},
}
# Make a request
try:
msg = paytrail.request(settings.VMAKSUT_ID, settings.VMAKSUT_SECRET, data)
except paytrail.PaytrailException as ex:
a, b = ex.args
logger.exception('(%s) %s', b, a)
return reverse('store:pm:paytrail-failure')
except Exception as ex:
logger.exception('%s.', ex)
return reverse('store:pm:paytrail-failure')
# Save token, redirect
ta.token = msg['token']
ta.payment_method_name = 'Paytrail'
ta.save()
# All done, redirect user
return msg['url']
def handle_failure(request):
""" Handles failure message from paytrail """
# Get parameters
order_number = request.GET.get('ORDER_NUMBER', '')
timestamp = request.GET.get('TIMESTAMP', '')
authcode = request.GET.get('RETURN_AUTHCODE', '')
secret = settings.VMAKSUT_SECRET
# Validate, and mark transaction as cancelled
if paytrail.validate_failure(order_number, timestamp, authcode, secret):
ta = get_object_or_404(StoreTransaction, pk=int(order_number))
ta_common.handle_cancellation(ta)
return HttpResponseRedirect(reverse('store:pm:paytrail-failure'))
return render(request, 'store/failure.html')
def handle_success(request):
""" Handles the success user redirect from Paytrail """
# Get parameters
order_number = request.GET.get('ORDER_NUMBER', '')
timestamp = request.GET.get('TIMESTAMP', '')
paid = request.GET.get('PAID', '')
method = request.GET.get('METHOD', '')
authcode = request.GET.get('RETURN_AUTHCODE', '')
secret = settings.VMAKSUT_SECRET
# Validate, and mark transaction as pending
if paytrail.validate_success(order_number, timestamp, paid, method, authcode, secret):
ta = get_object_or_404(StoreTransaction, pk=int(order_number))
ta_common.handle_pending(ta)
return HttpResponseRedirect(reverse('store:pm:paytrail-success'))
return render(request, 'store/success.html')
def handle_notify(request):
""" Handles the actual success notification from Paytrail """
# Get parameters
order_number = request.GET.get('ORDER_NUMBER', '')
timestamp = request.GET.get('TIMESTAMP', '')
paid = request.GET.get('PAID', '')
method = request.GET.get('METHOD', '')
authcode = request.GET.get('RETURN_AUTHCODE', '')
secret = settings.VMAKSUT_SECRET
# Validate & handle
if paytrail.validate_success(order_number, timestamp, paid, method, authcode, secret):
# Get transaction
ta = get_object_or_404(StoreTransaction, pk=int(order_number))
if ta.is_paid:
logger.warning('Somebody is trying to pay an already paid transaction (%s).', ta.id)
return HttpResponse("")
# Use common functions to handle the payment
# If handling the payment fails, cause 404.
# This will tell paytrail to try notifying again later.
if not ta_common.handle_payment(ta):
raise Http404
else:
logger.warning("Error while attempting to validate paytrail notification!")
raise Http404
# Just respond with something
return HttpResponse("")
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceField/uniqueName.py
|
83
|
class Foo:
def __init__(self):
self.s = ""
self.str = ""
def bar(self):
print "<caret>"
|
LiveZenLK/CeygateERP
|
refs/heads/master
|
addons/account/models/__init__.py
|
67
|
# -*- coding: utf-8 -*-
import partner
import account
import account_payment
import account_invoice
import account_bank_statement
import account_move
import chart_template
import account_analytic_line
import account_journal_dashboard
import product
import company
import res_config
import web_planner
|
svn2github/vbox
|
refs/heads/master
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Ecc/FileProfile.py
|
11
|
## @file
# fragments of source file
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
import os
from ParserWarning import Warning
CommentList = []
PPDirectiveList = []
PredicateExpressionList = []
FunctionDefinitionList = []
VariableDeclarationList = []
EnumerationDefinitionList = []
StructUnionDefinitionList = []
TypedefDefinitionList = []
FunctionCallingList = []
## record file data when parsing source
#
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
self.FileLinesListFromFile = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesListFromFile = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
|
cyberplant/scrapy
|
refs/heads/master
|
tests/test_utils_reqser.py
|
103
|
# -*- coding: utf-8 -*-
import unittest
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.reqser import request_to_dict, request_from_dict
class RequestSerializationTest(unittest.TestCase):
def setUp(self):
self.spider = TestSpider()
def test_basic(self):
r = Request("http://www.example.com")
self._assert_serializes_ok(r)
def test_all_attributes(self):
r = Request("http://www.example.com",
callback='parse_item',
errback='handle_error',
method="POST",
body=b"some body",
headers={'content-encoding': 'text/html; charset=latin-1'},
cookies={'currency': u'руб'},
encoding='latin-1',
priority=20,
meta={'a': 'b'})
self._assert_serializes_ok(r)
def test_latin1_body(self):
r = Request("http://www.example.com", body=b"\xa3")
self._assert_serializes_ok(r)
def test_utf8_body(self):
r = Request("http://www.example.com", body=b"\xc2\xa3")
self._assert_serializes_ok(r)
def _assert_serializes_ok(self, request, spider=None):
d = request_to_dict(request, spider=spider)
request2 = request_from_dict(d, spider=spider)
self._assert_same_request(request, request2)
def _assert_same_request(self, r1, r2):
self.assertEqual(r1.url, r2.url)
self.assertEqual(r1.callback, r2.callback)
self.assertEqual(r1.errback, r2.errback)
self.assertEqual(r1.method, r2.method)
self.assertEqual(r1.body, r2.body)
self.assertEqual(r1.headers, r2.headers)
self.assertEqual(r1.cookies, r2.cookies)
self.assertEqual(r1.meta, r2.meta)
self.assertEqual(r1._encoding, r2._encoding)
self.assertEqual(r1.priority, r2.priority)
self.assertEqual(r1.dont_filter, r2.dont_filter)
def test_callback_serialization(self):
r = Request("http://www.example.com", callback=self.spider.parse_item,
errback=self.spider.handle_error)
self._assert_serializes_ok(r, spider=self.spider)
def test_unserializable_callback1(self):
r = Request("http://www.example.com", callback=lambda x: x)
self.assertRaises(ValueError, request_to_dict, r)
self.assertRaises(ValueError, request_to_dict, r, spider=self.spider)
def test_unserializable_callback2(self):
r = Request("http://www.example.com", callback=self.spider.parse_item)
self.assertRaises(ValueError, request_to_dict, r)
class TestSpider(Spider):
name = 'test'
def parse_item(self, response):
pass
def handle_error(self, failure):
pass
|
hsaputra/tensorflow
|
refs/heads/master
|
tensorflow/python/training/saver_test.py
|
6
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import os
import random
import shutil
import tempfile
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if context.in_graph_mode():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if context.in_graph_mode():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
sess.run(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
sess.run(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
else:
sess = None
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
def testFilenameTensor(self):
v0 = variables.Variable(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.test_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(sess.run(tensor), filename)
def testInvalidPath(self):
v0 = variables.Variable(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(errors.NotFoundError,
"Failed to find any matching files for"):
save.restore(sess, "invalid path")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.Variable(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
variables.global_variables_initializer().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = variables.Variable(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if context.in_graph_mode():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.in_graph_mode():
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
else:
sess = None
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t1.insert("k22", 44.0).run()
self.assertEqual(222, v1.eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
with self.test_session():
v0 = variables.Variable(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if slices:
assert not partitioner
# TODO(apassos): make create_partitioned_variables take use_resource
# option to make this test passable without creating a named
# variable_scope.
vs = partitioned_variables.create_partitioned_variables(
var_full_shape, slices, rnd, name=var_name)
elif partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.Variable(rnd, name=var_name)]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({var_name: (vs if slices else vs[0])})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
if slices:
assert not partitioner
new_vs = partitioned_variables.create_partitioned_variables(
var_full_shape,
slices,
array_ops.zeros(var_full_shape), # != original contents.
name=var_name)
elif partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.Variable(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: (new_vs if slices else new_vs[0])
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Saves 10 horizontal parts of a partitioned variable.
# Restores into a full variable, non-sliced.
saved_full = _save(slices=[10, 1])
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number/orientation of slices.
restored_full = _restore(slices=[2, 1]) # 2 horizon parts.
self.assertAllEqual(saved_full, restored_full)
restored_full = _restore(slices=[1, 3]) # 3 vertical parts.
self.assertAllEqual(saved_full, restored_full)
# Restores into a PartitionedVariable
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores in slices.
saved_full = _save()
restored_full = _restore(slices=[1, 3])
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
variables.global_variables_initializer().run()
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v})
variables.global_variables_initializer().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = variables.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
variables.global_variables_initializer().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if context.in_graph_mode():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.test_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = saver_module.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = saver_module.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
saver_module.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = saver_module.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
saver_module._update_checkpoint_state( # pylint: disable=protected-access
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
saver_module.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = variables.Variable(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(29, len(meta_graph_def0.graph_def.node))
self.assertEqual(19, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=ops_lib.Graph()):
# Creates a graph.
variables.Variable(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.test_session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
variables.global_variables_initializer().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
sess.run(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
sess.run(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(0.0)
var = variables.Variable(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2",
"SaveSlices", "Sub", "VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2", "SaveV2",
"Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.Variable(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.Variable(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
saver_module.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = saver_module.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.test_session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.initialize_variables(rest_variables)
with self.test_session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
sess.run([weights1, biases1])
# Initialize the rest of the variables and run logits.
sess.run(init_rest_op)
sess.run(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.test_session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.Variable([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.Variable([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.test_session(graph=graph) as sess:
variables.global_variables_initializer().run()
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.test_session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, var_dict1["variable1:0"].eval())
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.test_session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, var_dict2["variable2:0"].eval())
# TODO(b/64763924): Remove after Jan 1st 2018.
class LenientNamesTest(test.TestCase):
def setUp(self):
super(LenientNamesTest, self).setUp()
os.putenv("TF_SAVER_LENIENT_NAMES", "True")
def tearDown(self):
os.putenv("TF_SAVER_LENIENT_NAMES", "")
super(LenientNamesTest, self).tearDown()
def testSaveRestore(self):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
{
"v0:0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
sess.run(init_all_op)
save.save(sess, save_path)
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
if __name__ == "__main__":
test.main()
|
limodou/uliweb
|
refs/heads/master
|
uliweb/utils/coloredlog.py
|
2
|
#coding=utf-8
import sys
import logging
import re
try:
import colorama
colorama.init()
except:
colorama = None
_r_color_delimeter = re.compile(r'\{\{.*?\}\}')
#Available formatting constants are:
#Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Style: DIM, NORMAL, BRIGHT, RESET_ALL
def colored(text, fore=None, back=None, style=None):
if colorama:
part = []
if fore:
part.append(getattr(colorama.Fore, fore.upper(), None))
if back:
part.append(getattr(colorama.Back, back.upper(), None))
if style:
part.append(getattr(colorama.Style, style.upper(), None))
part.append(text)
part = filter(None, part)
part.append(colorama.Fore.RESET + colorama.Back.RESET + colorama.Style.RESET_ALL)
return ''.join(part)
else:
return text
class ColoredStream(object):
def __init__(self, stream, color_delimeter=('{{', '}}')):
self.stream = stream
self.color_delimeter = color_delimeter
def write(self, buf):
def m(match):
c, text = match.group()[2:-2].split(':', 1)
v = list(c.split('|'))
v.extend(['', ''])
fore, back, style = v[:3]
msg = colored(text, fore, back, style)
return msg
b = _r_color_delimeter.sub(m, buf)
self.stream.write(b)
default_log_colors = {
'DEBUG': 'white',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
class ColoredFormatter(logging.Formatter):
"""A formatter that allows colors to be placed in the format string.
Intended to help in creating more readable logging output."""
def __init__(self, format=None, datefmt=None,
log_colors=None, reset=True, style='%'):
"""
:Parameters:
- format (str): The format string to use
- datefmt (str): A format string for the date
- log_colors (dict):
A mapping of log level names to color names
- reset (bool):
Implictly append a color reset to all records unless False
- style ('%' or '{' or '$'):
The format style to use. No meaning prior to Python 3.2.
The ``format``, ``datefmt`` and ``style`` args are passed on to the
Formatter constructor.
"""
if sys.version_info > (3, 2):
super(ColoredFormatter, self).__init__(
format, datefmt, style=style)
elif sys.version_info > (2, 7):
super(ColoredFormatter, self).__init__(format, datefmt)
else:
logging.Formatter.__init__(self, format, datefmt)
self.log_colors = default_log_colors
self.log_colors.update(log_colors or {})
self.reset = reset
def format(self, record):
# If we recognise the level name,
# add the levels color as `log_color`
# Format the message
if sys.version_info > (2, 7):
message = super(ColoredFormatter, self).format(record)
else:
message = logging.Formatter.format(self, record)
if record.levelname in self.log_colors:
message = colored(message, self.log_colors[record.levelname])
return message
class ColoredStreamHandler(logging.StreamHandler):
def __init__(self, stream=None, color_delimeter=('{{', '}}')):
logging.StreamHandler.__init__(self, stream)
self.color_delimeter = color_delimeter
self.stream = ColoredStream(self.stream, color_delimeter)
if __name__ == '__main__':
log = logging.getLogger('test')
log.addHandler(ColoredStreamHandler())
log.setLevel(logging.DEBUG)
log.info("Test {{white|red:Red text}} {{green:Green Text}} {{yellow|white|BRIGHT:bright}}")
|
chrsmithdemos/selenium
|
refs/heads/master
|
py/selenium/webdriver/common/by.py
|
44
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The By implementation.
"""
class By(object):
"""
Set of supported locator strategies.
"""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
@classmethod
def is_valid(cls, by):
for attr in dir(cls):
if by == getattr(cls, attr):
return True
return False
|
FreddieShoreditch/image_folder_organiser
|
refs/heads/master
|
venv/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.py
|
19
|
#
# The Python Imaging Library
# $Id$
#
# JPEG2000 file handling
#
# History:
# 2014-03-12 ajh Created
#
# Copyright (c) 2014 Coriolis Systems Limited
# Copyright (c) 2014 Alastair Houghton
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile
import struct
import os
import io
__version__ = "0.1"
def _parse_codestream(fp):
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = struct.unpack('>H', hdr)[0]
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \
xtosiz, ytosiz, csiz \
= struct.unpack('>HHIIIIIIIIH', siz[:38])
ssiz = [None]*csiz
xrsiz = [None]*csiz
yrsiz = [None]*csiz
for i in range(csiz):
ssiz[i], xrsiz[i], yrsiz[i] \
= struct.unpack('>BBB', siz[36 + 3 * i:39 + 3 * i])
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
if (yrsiz[0] & 0x7f) > 8:
mode = 'I;16'
else:
mode = 'L'
elif csiz == 2:
mode = 'LA'
elif csiz == 3:
mode = 'RGB'
elif csiz == 4:
mode = 'RGBA'
else:
mode = None
return (size, mode)
def _parse_jp2_header(fp):
"""Parse the JP2 header box to extract size, component count and
color space information, returning a PIL (size, mode) tuple."""
# Find the JP2 header box
header = None
while True:
lbox, tbox = struct.unpack('>I4s', fp.read(8))
if lbox == 1:
lbox = struct.unpack('>Q', fp.read(8))[0]
hlen = 16
else:
hlen = 8
if lbox < hlen:
raise SyntaxError('Invalid JP2 header length')
if tbox == b'jp2h':
header = fp.read(lbox - hlen)
break
else:
fp.seek(lbox - hlen, os.SEEK_CUR)
if header is None:
raise SyntaxError('could not find JP2 header')
size = None
mode = None
bpc = None
hio = io.BytesIO(header)
while True:
lbox, tbox = struct.unpack('>I4s', hio.read(8))
if lbox == 1:
lbox = struct.unpack('>Q', hio.read(8))[0]
hlen = 16
else:
hlen = 8
content = hio.read(lbox - hlen)
if tbox == b'ihdr':
height, width, nc, bpc, c, unkc, ipr \
= struct.unpack('>IIHBBBB', content)
size = (width, height)
if unkc:
if nc == 1 and (bpc & 0x7f) > 8:
mode = 'I;16'
elif nc == 1:
mode = 'L'
elif nc == 2:
mode = 'LA'
elif nc == 3:
mode = 'RGB'
elif nc == 4:
mode = 'RGBA'
break
elif tbox == b'colr':
meth, prec, approx = struct.unpack('>BBB', content[:3])
if meth == 1:
cs = struct.unpack('>I', content[3:7])[0]
if cs == 16: # sRGB
if nc == 1 and (bpc & 0x7f) > 8:
mode = 'I;16'
elif nc == 1:
mode = 'L'
elif nc == 3:
mode = 'RGB'
elif nc == 4:
mode = 'RGBA'
break
elif cs == 17: # grayscale
if nc == 1 and (bpc & 0x7f) > 8:
mode = 'I;16'
elif nc == 1:
mode = 'L'
elif nc == 2:
mode = 'LA'
break
elif cs == 18: # sYCC
if nc == 3:
mode = 'RGB'
elif nc == 4:
mode = 'RGBA'
break
return (size, mode)
##
# Image plugin for JPEG2000 images.
class Jpeg2KImageFile(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self):
sig = self.fp.read(4)
if sig == b'\xff\x4f\xff\x51':
self.codec = "j2k"
self.size, self.mode = _parse_codestream(self.fp)
else:
sig = sig + self.fp.read(8)
if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
self.codec = "jp2"
self.size, self.mode = _parse_jp2_header(self.fp)
else:
raise SyntaxError('not a JPEG 2000 file')
if self.size is None or self.mode is None:
raise SyntaxError('unable to determine size/mode')
self.reduce = 0
self.layers = 0
fd = -1
length = -1
try:
fd = self.fp.fileno()
length = os.fstat(fd).st_size
except:
fd = -1
try:
pos = self.fp.tell()
self.fp.seek(0, 2)
length = self.fp.tell()
self.fp.seek(pos, 0)
except:
length = -1
self.tile = [('jpeg2k', (0, 0) + self.size, 0,
(self.codec, self.reduce, self.layers, fd, length))]
def load(self):
if self.reduce:
power = 1 << self.reduce
adjust = power >> 1
self.size = (int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power))
if self.tile:
# Update the reduce and layers settings
t = self.tile[0]
t3 = (t[3][0], self.reduce, self.layers, t[3][3], t[3][4])
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
ImageFile.ImageFile.load(self)
def _accept(prefix):
return (prefix[:4] == b'\xff\x4f\xff\x51' or
prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a')
# ------------------------------------------------------------
# Save support
def _save(im, fp, filename):
if filename.endswith('.j2k'):
kind = 'j2k'
else:
kind = 'jp2'
# Get the keyword arguments
info = im.encoderinfo
offset = info.get('offset', None)
tile_offset = info.get('tile_offset', None)
tile_size = info.get('tile_size', None)
quality_mode = info.get('quality_mode', 'rates')
quality_layers = info.get('quality_layers', None)
num_resolutions = info.get('num_resolutions', 0)
cblk_size = info.get('codeblock_size', None)
precinct_size = info.get('precinct_size', None)
irreversible = info.get('irreversible', False)
progression = info.get('progression', 'LRCP')
cinema_mode = info.get('cinema_mode', 'no')
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
fd
)
ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
Image.register_save(Jpeg2KImageFile.format, _save)
Image.register_extension(Jpeg2KImageFile.format, '.jp2')
Image.register_extension(Jpeg2KImageFile.format, '.j2k')
Image.register_extension(Jpeg2KImageFile.format, '.jpc')
Image.register_extension(Jpeg2KImageFile.format, '.jpf')
Image.register_extension(Jpeg2KImageFile.format, '.jpx')
Image.register_extension(Jpeg2KImageFile.format, '.j2c')
Image.register_mime(Jpeg2KImageFile.format, 'image/jp2')
Image.register_mime(Jpeg2KImageFile.format, 'image/jpx')
|
guillaume-philippon/aquilon
|
refs/heads/master
|
tests/broker/test_reconfigure.py
|
1
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the reconfigure command."""
import os
import re
from datetime import datetime
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from broker.grntest import VerifyGrnsMixin
from broker.notificationtest import VerifyNotificationsMixin
class TestReconfigure(VerifyGrnsMixin, VerifyNotificationsMixin,
TestBrokerCommand):
linux_version_prev = None
linux_version_curr = None
# Note that some tests for reconfigure --list appear in
# test_make_aquilon.py.
@classmethod
def setUpClass(cls):
super(TestReconfigure, cls).setUpClass()
cls.linux_version_prev = cls.config.get("unittest",
"linux_version_prev")
cls.linux_version_curr = cls.config.get("unittest",
"linux_version_curr")
def test_1000_edit_machine_plenary(self):
# "aq reconfigure" should refresh the machine plenary. We verify that by
# intentionally breaking it first.
path = self.plenary_name("machine", "americas", "ut", "ut9",
"ut9s03p45")
with open(path, "a") as fp:
fp.write('\n"broken" = "template";\n')
command = ["cat", "--machine=ut9s03p45"]
out = self.commandtest(command)
self.matchoutput(out, '"broken" = "template";', command)
def test_1001_reconfigure_aquilon95(self):
command = ["reconfigure", "--hostname=aquilon95.aqd-unittest.ms.com"]
self.successtest(command)
def test_1002_verify_machine_plenary(self):
command = ["cat", "--machine=ut9s03p45"]
out = self.commandtest(command)
self.matchclean(out, "broken", command)
self.matchoutput(out, '"sysloc/room" = "utroom2";', command)
self.matchoutput(out, '"sysloc/bunker" = "bucket2.ut";', command)
self.matchoutput(out, '"sysloc/building" = "ut";', command)
self.matchoutput(out, '"sysloc/city" = "ny";', command)
self.matchoutput(out, '"sysloc/continent" = "na";', command)
self.matchoutput(out, '"sysloc/country" = "us";', command)
def test_1010_reconfigurelist_grn_pre(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest", command)
def test_1011_list_grn(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("grnlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--grn=grn:/ms/ei/aquilon/aqd"]
self.successtest(command)
def test_1015_reconfigurelist_grn_post(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_1020_reconfigurelist_cleargrn_pre(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_1021_reconfigurelist_cleargrn(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
scratchfile = self.writescratch("grnlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--cleargrn"]
self.statustest(command)
def test_1025_reconfigurelist_cleargrn_post(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.searchclean(out, "^ Owned by GRN", command)
def test_1030_reconfigure_cleargrn(self):
command = "show host --hostname aquilon91.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
command = ["reconfigure", "--hostname", "aquilon91.aqd-unittest.ms.com",
"--cleargrn"]
out = self.successtest(command)
command = "show host --hostname aquilon91.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.searchclean(out, "^ Owned by GRN", command)
def test_1040_reconfigure_membersof_cluster(self):
# This will exercise the cluster-aligned services code,
# which does not kick in at 'make' time because the hosts
# have not been bound to clusters yet.
command = ["reconfigure", "--membersof", "utecl1"]
out = self.statustest(command)
self.matchoutput(out, "/3 template(s) being processed",
command)
def test_1040_reconfigure_membersof_metacluster(self):
command = ["reconfigure", "--membersof", "utmc1"]
out = self.statustest(command)
self.matchoutput(out, "/5 template(s) being processed",
command)
def test_1050_cat_unittest02_pre(self):
command = "cat --hostname unittest02.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"system/build" = "build";', command)
self.matchoutput(out, '"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/unittest"], command)
# The rebind test has changed the service bindings for afs,
# it should now be set to q.ln.ms.com. The reconfigure will
# force it *back* to using a correct service map entry, in
# this case q.ny.ms.com.
def test_1051_reconfigure_unittest02(self):
basetime = datetime.now()
command = ["reconfigure", "--hostname", "unittest02.one-nyp.ms.com",
"--buildstatus", "ready", "--grn", "grn:/ms/ei/aquilon/aqd",
"--comments", "New host comments"]
err = self.statustest(command)
self.matchoutput(err,
"unittest02.one-nyp.ms.com adding binding for "
"service instance afs/q.ny.ms.com",
command)
self.matchoutput(err,
"unittest02.one-nyp.ms.com removing binding for "
"service instance afs/q.ln.ms.com",
command)
self.matchoutput(err, "Index rebuild and notifications will happen in "
"the background.", command)
self.wait_notification(basetime, 1)
def test_1055_show_unittest02(self):
command = "show host --hostname unittest02.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Build Status: ready", command)
self.matchoutput(out, "Advertise Status: True", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
self.matchoutput(out, "Host Comments: New host comments", command)
def test_1055_cat_unittest02_data(self):
command = "cat --hostname unittest02.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/unittest02.one-nyp.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut3/ut3c5n10");',
command)
self.searchoutput(out,
r'"system/network/interfaces/eth0" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest02.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[0],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.matchoutput(out, '"system/advertise_status" = true;', command)
self.matchoutput(out, '"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/aqd"], command)
command = "cat --hostname unittest02.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"object template unittest02.one-nyp.ms.com;",
command)
self.searchoutput(out,
r'variable LOADPATH = list\(\s*"aquilon"\s*\);',
command)
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/unittest02.one-nyp.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' %
self.linux_version_prev,
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/compileserver/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1056_clear_comments(self):
command = ["reconfigure", "--hostname", "unittest02.one-nyp.ms.com",
"--comments", ""]
self.statustest(command)
def test_1057_verify_comments(self):
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Host Comments", command)
# These settings have not changed - the command should still succeed.
def test_1060_reconfigur_eunittest00(self):
basetime = datetime.now()
command = ["reconfigure", "--hostname", "unittest00.one-nyp.ms.com"]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
self.matchoutput(err, "Index rebuild and notifications will happen in "
"the background.", command)
self.wait_notification(basetime, 1)
def test_1065_cat_unittest00_data(self):
command = "cat --hostname unittest00.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/unittest00.one-nyp.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut3/ut3c1n3");',
command)
self.searchoutput(out,
r'"system/network/interfaces/eth0" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest00.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[2],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.searchoutput(out,
r'"system/network/interfaces/eth1" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest00-e1.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[3],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.matchoutput(out, '"system/advertise_status" = false;', command)
def test_1065_cat_unittest00(self):
command = "cat --hostname unittest00.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/unittest00.one-nyp.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' %
self.linux_version_prev,
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/compileserver/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1070_reconfigure_windows_status(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure",
"--hostname", "unittest01.one-nyp.ms.com",
"--buildstatus", "ready"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
self.assertFalse(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="ut-prod")))
def test_1071_reconfigure_windows_personality(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--personality", "desktop"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
def test_1072_reconfigure_windows_os(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--osversion", "nt61e"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
def test_1073_make_compileable(self):
# We need a domain which is guaranteed to be compileable...
self.statustest(["manage", "--hostname", "unittest01.one-nyp.ms.com",
"--domain", "unittest", "--force"])
self.statustest(["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "aurora", "--personality", "generic",
"--osname", "linux", "--osversion", self.linux_version_prev])
self.assertTrue(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="unittest")))
def test_1074_make_noncompileable(self):
self.statustest(["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "windows", "--personality", "desktop",
"--osname", "windows", "--osversion", "nt61e"])
self.assertFalse(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="unittest")))
self.statustest(["manage", "--hostname", "unittest01.one-nyp.ms.com",
"--domain", "ut-prod", "--force"])
def test_1075_show_unittest01(self):
command = "show host --hostname unittest01.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "Archetype: windows", command)
self.matchoutput(out, "Personality: desktop", command)
self.matchoutput(out, "Build Status: ready", command)
self.matchoutput(out, "Operating System: windows", command)
self.matchoutput(out, "Version: nt61e", command)
self.matchoutput(out, "Advertise Status: True", command)
self.matchoutput(out, "Domain: ut-prod", command)
def test_1080_reconfigure_os(self):
command = ["reconfigure",
"--hostname", "aquilon61.aqd-unittest.ms.com",
"--osname", "linux", "--osversion", self.linux_version_curr]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1085_reconfigure_os_split_args(self):
command = ["reconfigure",
"--hostname", "unittest17.aqd-unittest.ms.com",
"--osname", "linux", "--osversion", self.linux_version_curr]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1090_keepbindings(self):
command = ["reconfigure", "--keepbindings",
"--hostname", "aquilon86.aqd-unittest.ms.com",
"--personality", "inventory"]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1100_remove_bindings(self):
command = ["reconfigure",
"--hostname", "aquilon87.aqd-unittest.ms.com",
"--personality", "inventory"]
err = self.statustest(command)
self.matchoutput(err, "removing binding for service instance chooser1", command)
self.matchoutput(err, "removing binding for service instance chooser2", command)
self.matchoutput(err, "removing binding for service instance chooser3", command)
self.matchclean(err, "adding binding", command)
def test_1105_verify_services(self):
for service in ["chooser1", "chooser2", "chooser3"]:
command = ["search_host", "--service", service,
"--hostname", "aquilon87.aqd-unittest.ms.com"]
self.noouttest(command)
def test_1105_verify_plenary_data(self):
command = "cat --hostname aquilon87.aqd-unittest.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/aquilon87.aqd-unittest.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut9/ut9s03p37");',
command)
def test_1105_verify_plenary(self):
osversion = self.config.get("archetype_aquilon", "default_osversion")
command = "cat --hostname aquilon87.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchclean(out, "chooser1", command)
self.matchclean(out, "chooser2", command)
self.matchclean(out, "chooser3", command)
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/aquilon87.aqd-unittest.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' % osversion,
command)
self.matchoutput(out,
"""include { "service/aqd/ny-prod/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/inventory/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1110_reconfigure_debug(self):
command = ["reconfigure", "--debug",
"--hostname", "aquilon88.aqd-unittest.ms.com",
"--personality", "inventory"]
_, err = self.successtest(command)
self.matchoutput(err, "Creating service chooser", command)
def test_1120_reconfigure_aligned(self):
for i in range(1, 5):
command = ["reconfigure",
"--hostname", "evh%s.aqd-unittest.ms.com" % i]
self.statustest(command)
def test_1125_verify_aligned(self):
# Check that utecl1 is now aligned to a service and that
# all of its members are aligned to the same service.
# evh[234] should be bound to utecl1
command = "show esx cluster --cluster utecl1"
out = self.commandtest(command.split(" "))
m = re.search(r'Member Alignment: Service esx_management_server '
r'Instance (\S+)', out)
self.assertTrue(m, "Aligned instance not found in output:\n%s" % out)
instance = m.group(1)
# A better test might be to search for all hosts in the cluster
# and make sure they're all in this list. That search command
# does not exist yet, though.
command = ["search_host", "--service=esx_management_server",
"--instance=%s" % instance]
out = self.commandtest(command)
self.matchoutput(out, "evh2.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh3.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh4.aqd-unittest.ms.com", command)
def test_1130_list_camelcase(self):
hosts = ["Aquilon91.Aqd-Unittest.ms.com"]
scratchfile = self.writescratch("camelcase", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
self.successtest(command)
def test_1140_list_no_osversion(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosversion", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--osname=linux"]
self.successtest(command)
def test_1150_list_no_osname(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosname", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--osversion=%s" % self.linux_version_prev]
self.successtest(command)
def test_1160_list_no_os_archetype(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosarchetype", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--osname=linux", "--osversion=%s" % self.linux_version_prev]
self.successtest(command)
def test_1170_os_required_service(self):
command = ["reconfigure", "--hostname", "aquilon69.aqd-unittest.ms.com",
"--osname", "solaris", "--osversion", "11.1-x86_64"]
out = self.statustest(command)
self.matchoutput(out,
"aquilon69.aqd-unittest.ms.com adding binding for "
"service instance ips/northamerica",
command)
def test_1175_cat_aquilon69(self):
command = ["cat", "--hostname", "aquilon69.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
'include { "service/ips/northamerica/client/config" };',
command)
def test_2000_windows_wrong_os(self):
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--osname", "linux", "--osversion", self.linux_version_prev]
err = self.notfoundtest(command)
self.matchoutput(err,
"Operating System linux, version %s, archetype "
"windows not found." % self.linux_version_prev,
command)
def test_2000_os_archetype_mismatch(self):
# Trying to change archetype, but there's no suitable OS
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "aquilon", "--personality", "unixeng-test"]
err = self.notfoundtest(command)
self.matchoutput(err,
"Operating System windows, version nt61e, "
"archetype aquilon not found.",
command)
def test_2000_os_archetype_mismatch_list(self):
hosts = ["unittest01.one-nyp.ms.com"]
scratchfile = self.writescratch("hostlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon", "--personality=unixeng-test"]
out = self.badrequesttest(command)
self.matchoutput(out,
"unittest01.one-nyp.ms.com: Operating System "
"windows, version nt61e, archetype aquilon not found.",
command)
def test_2000_missing_personality(self):
command = ["reconfigure",
"--hostname", "aquilon62.aqd-unittest.ms.com",
"--archetype", "windows"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality inventory, archetype windows not found.",
command)
def test_2000_personality_not_allowed(self):
command = ["reconfigure", "--hostname=evh2.aqd-unittest.ms.com",
"--personality=esx_server"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality vmhost/esx_server is not allowed by "
"ESX Cluster utecl1. Specify one of: "
"vmhost/vulcan-10g-server-prod.",
command)
def test_2000_personality_not_allowed_list(self):
hosts = ["evh2.aqd-unittest.ms.com"]
scratchfile = self.writescratch("persnotallowed", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype=vmhost", "--personality=esx_server"]
out = self.badrequesttest(command)
self.matchoutput(out,
"evh2.aqd-unittest.ms.com: Personality "
"vmhost/esx_server is not allowed by ESX Cluster "
"utecl1. Specify one of: vmhost/vulcan-10g-server-prod.",
command)
def test_2000_hostlist_multiple_domains(self):
hosts = ["unittest02.one-nyp.ms.com",
"server1.aqd-unittest.ms.com",
"server2.aqd-unittest.ms.com",
"evh1.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("diffdomains", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "All objects must be in the same domain or sandbox:", command)
self.matchoutput(out, "3 objects in sandbox %s/utsandbox" % self.user, command)
self.matchoutput(out, "2 objects in domain unittest", command)
def test_2000_missing_required_service(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingmap", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon",
"--personality", "badpersonality2"]
out = self.badrequesttest(command)
self.matchoutput(out, "Could not find a relevant service map", command)
self.matchoutput(out, "The following hosts failed service binding:",
command)
self.matchoutput(out, "aquilon91.aqd-unittest.ms.com", command)
def test_2000_list_personality_no_archetype(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingarchetype", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality=generic"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality generic, archetype aquilon not found.",
command)
def test_2000_missing_personality_stage(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingpersst", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality", "nostage",
"--personality_stage", "previous"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality aquilon/nostage does not have stage "
"previous.",
command)
def test_2000_empty_hostlist(self):
hosts = ["#host", "#does", "", " #not ", "#exist"]
scratchfile = self.writescratch("empty", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "Empty list.", command)
def test_2000_bad_hosts_in_list(self):
hosts = ["host-does-not-exist.aqd-unittest.ms.com",
"another-host-does-not-exist.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com",
"host.domain-does-not-exist.ms.com"]
scratchfile = self.writescratch("missinghost", "\n".join(hosts))
# Use the deprecated option name here
command = ["reconfigure", "--hostlist", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "The --hostlist option is deprecated.", command)
self.matchoutput(out, "Invalid hosts in list:", command)
self.matchoutput(out,
"Host host-does-not-exist.aqd-unittest.ms.com not found.",
command)
self.matchoutput(out,
"Host another-host-does-not-exist.aqd-unittest.ms.com not found.",
command)
self.matchoutput(out,
"Host host.domain-does-not-exist.ms.com not found.",
command)
self.matchoutput(out,
"DNS Domain domain-does-not-exist.ms.com not found.",
command)
self.matchclean(out, "aquilon91.aqd-unittest.ms.com:", command)
def test_2000_over_list_limit(self):
hostlimit = self.config.getint("broker", "reconfigure_max_list_size")
hosts = []
for i in range(1, 20):
hosts.append("thishostdoesnotexist%d.aqd-unittest.ms.com" % i)
scratchfile = self.writescratch("reconfigurelistlimit", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--personality=generic"]
out = self.badrequesttest(command)
self.matchoutput(out, "The number of hosts in list {0:d} can not be more "
"than {1:d}".format(len(hosts), hostlimit), command)
def test_2000_cluster_req(self):
command = ["reconfigure", "--hostname", "aquilon62.aqd-unittest.ms.com",
"--personality", "clustered"]
out = self.badrequesttest(command)
self.matchoutput(out, "Personality aquilon/clustered requires cluster "
"membership", command)
def test_2000_cluster_req_list(self):
hosts = ["aquilon62.aqd-unittest.ms.com"]
scratchfile = self.writescratch("cluster_req", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality", "clustered"]
out = self.badrequesttest(command)
self.matchoutput(out, "Personality aquilon/clustered requires cluster "
"membership", command)
def test_2000_promote_mixed_personality(self):
hosts = ["unittest00.one-nyp.ms.com",
"unittest12.aqd-unittest.ms.com"]
scratchfile = self.writescratch("promote_mixed_personality",
"\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality_stage", "next"]
out = self.badrequesttest(command)
self.matchoutput(out, "Promoting hosts in multiple personalities is "
"not supported.", command)
def test_3000_missing_required_params(self):
command = ["reconfigure",
"--hostname", "aquilon62.aqd-unittest.ms.com",
"--personality", "badpersonality"]
out = self.badrequesttest(command)
self.matchoutput(out, "cannot locate template named 'personality/badpersonality/espinfo'", command)
buildfile = self.build_profile_name("aquilon62.aqd-unittest.ms.com",
domain="utsandbox")
results = self.grepcommand(["-l", "badpersonality", buildfile])
self.assertFalse(results, "Found bad personality data in plenary "
"template for aquilon62.aqd-unittest.ms.com")
def test_3010_missing_personality_template_hostlist(self):
hosts = ["aquilon93.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingtemplate", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon", "--personality", "badpersonality"]
out = self.badrequesttest(command)
self.matchoutput(out, "cannot locate template named 'personality/badpersonality/espinfo'", command)
self.assertFalse(os.path.exists(
self.build_profile_name("aquilon93.aqd-unittest.ms.com",
domain="utsandbox")))
servicedir = os.path.join(self.config.get("broker", "plenarydir"),
"servicedata")
results = self.grepcommand(["-rl", "aquilon93.aqd-unittest.ms.com",
servicedir])
self.assertFalse(results, "Found service plenary data that includes "
"aquilon93.aqd-unittest.ms.com")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestReconfigure)
unittest.TextTestRunner(verbosity=2).run(suite)
|
listingmirror/boto
|
refs/heads/develop
|
boto/sqs/attributes.py
|
223
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Attribute Name/Value set
"""
class Attributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Attribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
self.current_value = value
else:
setattr(self, name, value)
|
charbeljc/OCB
|
refs/heads/8.0
|
openerp/report/printscreen/ps_form.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.report.interface import report_int
import openerp.tools as tools
from openerp.report import render
from lxml import etree
import time, os
class report_printscreen_list(report_int):
def __init__(self, name):
report_int.__init__(self, name)
def _parse_node(self, root_node):
result = []
for node in root_node:
if node.tag == 'field':
attrsa = node.attrib
attrs = {}
if not attrsa is None:
for key,val in attrsa.items():
attrs[key] = val
result.append(attrs['name'])
else:
result.extend(self._parse_node(node))
return result
def _parse_string(self, view):
dom = etree.XML(view)
return self._parse_node(dom)
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
datas['ids'] = ids
registry = openerp.registry(cr.dbname)
model = registry[datas['model']]
# title come from description of model which are specified in py file.
self.title = model._description
result = model.fields_view_get(cr, uid, view_type='form', context=context)
fields_order = self._parse_string(result['arch'])
rows = model.read(cr, uid, datas['ids'], result['fields'].keys() )
self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model._description)
return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0,210.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
# build header
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
l = []
t = 0
strmax = (pageSize[0]-40) * 2.8346
for f in fields_order:
s = 0
if fields[f]['type'] in ('date','time','float','integer'):
s = 60
strmax -= s
else:
t += fields[f].get('size', 56) / 28 + 1
l.append(s)
for pos in range(len(l)):
if not l[pos]:
s = fields[fields_order[pos]].get('size', 56) / 28 + 1
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.SubElement(header, 'field')
field.text = fields[f]['string'] or ''
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
for f in fields_order:
if fields[f]['type']=='many2one' and line[f]:
line[f] = line[f][1]
if fields[f]['type'] in ('one2many','many2many') and line[f]:
line[f] = '( '+str(len(line[f])) + ' )'
if fields[f]['type'] == 'float':
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
line[f]=round(line[f],precision)
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] is not None:
col.text = tools.ustr(line[f] or '')
else:
col.text = '/'
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml, self.title)
self.obj.render()
return True
report_printscreen_list('report.printscreen.form')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tumbl3w33d/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_httppolicyset.py
|
28
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_httppolicyset
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of HTTPPolicySet Avi RESTful Object
description:
- This module is used to configure HTTPPolicySet object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
http_request_policy:
description:
- Http request policy for the virtual service.
http_response_policy:
description:
- Http response policy for the virtual service.
http_security_policy:
description:
- Http security policy for the virtual service.
is_internal_policy:
description:
- Boolean flag to set is_internal_policy.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name of the http policy set.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the http policy set.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a HTTP Policy set two switch between testpool1 and testpool2
avi_httppolicyset:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: test-HTTP-Policy-Set
tenant_ref: admin
http_request_policy:
rules:
- index: 1
enable: true
name: test-test1
match:
path:
match_case: INSENSITIVE
match_str:
- /test1
match_criteria: EQUALS
switching_action:
action: HTTP_SWITCHING_SELECT_POOL
status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200
pool_ref: "/api/pool?name=testpool1"
- index: 2
enable: true
name: test-test2
match:
path:
match_case: INSENSITIVE
match_str:
- /test2
match_criteria: CONTAINS
switching_action:
action: HTTP_SWITCHING_SELECT_POOL
status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200
pool_ref: "/api/pool?name=testpool2"
is_internal_policy: false
"""
RETURN = '''
obj:
description: HTTPPolicySet (api/httppolicyset) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_config_cksum=dict(type='str',),
created_by=dict(type='str',),
description=dict(type='str',),
http_request_policy=dict(type='dict',),
http_response_policy=dict(type='dict',),
http_security_policy=dict(type='dict',),
is_internal_policy=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'httppolicyset',
set([]))
if __name__ == '__main__':
main()
|
mensler/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_gir.py
|
56
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_gir
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Trigger a graceful removal or insertion (GIR) of the switch.
description:
- Trigger a graceful removal or insertion (GIR) of the switch.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- C(state) has effect only in combination with
C(system_mode_maintenance_timeout) or
C(system_mode_maintenance_on_reload_reset_reason).
- Using C(system_mode_maintenance) and
C(system_mode_maintenance_dont_generate_profile) would make the module
fail, but the system mode will be triggered anyway.
options:
system_mode_maintenance:
description:
- When C(system_mode_maintenance=true) it puts all enabled
protocols in maintenance mode (using the isolate command).
When C(system_mode_maintenance=false) it puts all enabled
protocols in normal mode (using the no isolate command).
required: false
default: null
choices: ['true','false']
system_mode_maintenance_dont_generate_profile:
description:
- When C(system_mode_maintenance_dont_generate_profile=true) it
prevents the dynamic searching of enabled protocols and executes
commands configured in a maintenance-mode profile.
Use this option if you want the system to use a maintenance-mode
profile that you have created.
When C(system_mode_maintenance_dont_generate_profile=false) it
prevents the dynamic searching of enabled protocols and executes
commands configured in a normal-mode profile. Use this option if
you want the system to use a normal-mode profile that
you have created.
required: false
default: null
choices: ['true','false']
system_mode_maintenance_timeout:
description:
- Keeps the switch in maintenance mode for a specified
number of minutes. Range is 5-65535.
required: false
default: null
system_mode_maintenance_shutdown:
description:
- Shuts down all protocols, vPC domains, and interfaces except
the management interface (using the shutdown command).
This option is disruptive while C(system_mode_maintenance)
(which uses the isolate command) is not.
required: false
default: null
choices: ['true','false']
system_mode_maintenance_on_reload_reset_reason:
description:
- Boots the switch into maintenance mode automatically in the
event of a specified system crash.
required: false
default: null
choices: ['hw_error','svc_failure','kern_failure','wdog_timeout',
'fatal_error','lc_failure','match_any','manual_reload']
state:
description:
- Specify desired state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Trigger system maintenance mode
- nxos_gir:
system_mode_maintenance: true
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Trigger system normal mode
- nxos_gir:
system_mode_maintenance: false
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Configure on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: manual_reload
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Add on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: hw_error
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: manual_reload
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Set timeout for maintenance mode
- nxos_gir:
system_mode_maintenance_timeout: 30
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove timeout for maintenance mode
- nxos_gir:
system_mode_maintenance_timeout: 30
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
final_system_mode:
description: describe the last system mode
returned: verbose mode
type: string
sample: normal
updates:
description: commands sent to the device
returned: verbose mode
type: list
sample: ["terminal dont-ask", "system mode maintenance timeout 10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show_ascii'):
cmds = [command]
if module.params['transport'] == 'cli':
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
body = run_commands(module, cmds)
return body
def get_system_mode(module):
command = 'show system mode'
body = execute_show_command(command, module)[0]
if 'normal' in body.lower():
mode = 'normal'
else:
mode = 'maintenance'
return mode
def get_maintenance_timeout(module):
command = 'show maintenance timeout'
body = execute_show_command(command, module)[0]
timeout = body.split()[4]
return timeout
def get_reset_reasons(module):
command = 'show maintenance on-reload reset-reasons'
body = execute_show_command(command, module)[0]
return body
def get_commands(module, state, mode):
commands = list()
system_mode = ''
if module.params['system_mode_maintenance'] is True and mode == 'normal':
commands.append('system mode maintenance')
elif (module.params['system_mode_maintenance'] is False and
mode == 'maintenance'):
commands.append('no system mode maintenance')
elif (module.params[
'system_mode_maintenance_dont_generate_profile'] is True and
mode == 'normal'):
commands.append('system mode maintenance dont-generate-profile')
elif (module.params[
'system_mode_maintenance_dont_generate_profile'] is False and
mode == 'maintenance'):
commands.append('no system mode maintenance dont-generate-profile')
elif module.params['system_mode_maintenance_timeout']:
timeout = get_maintenance_timeout(module)
if (state == 'present' and
timeout != module.params['system_mode_maintenance_timeout']):
commands.append('system mode maintenance timeout {0}'.format(
module.params['system_mode_maintenance_timeout']))
elif (state == 'absent' and
timeout == module.params['system_mode_maintenance_timeout']):
commands.append('no system mode maintenance timeout {0}'.format(
module.params['system_mode_maintenance_timeout']))
elif module.params['system_mode_maintenance_shutdown'] is True:
commands.append('system mode maintenance shutdown')
elif module.params['system_mode_maintenance_on_reload_reset_reason']:
reset_reasons = get_reset_reasons(module)
if (state == 'present' and
module.params['system_mode_maintenance_on_reload_reset_reason'].lower() not in reset_reasons.lower()):
commands.append('system mode maintenance on-reload '
'reset-reason {0}'.format(
module.params[
'system_mode_maintenance_on_reload_reset_reason']))
elif (state == 'absent' and
module.params[
'system_mode_maintenance_on_reload_reset_reason'].lower() in
reset_reasons.lower()):
commands.append('no system mode maintenance on-reload '
'reset-reason {0}'.format(
module.params[
'system_mode_maintenance_on_reload_reset_reason']))
if commands:
commands.insert(0, 'terminal dont-ask')
return commands
def main():
argument_spec = dict(
system_mode_maintenance=dict(required=False, type='bool'),
system_mode_maintenance_dont_generate_profile=dict(required=False,
type='bool'),
system_mode_maintenance_timeout=dict(required=False, type='str'),
system_mode_maintenance_shutdown=dict(required=False, type='bool'),
system_mode_maintenance_on_reload_reset_reason=dict(required=False,
choices=['hw_error','svc_failure','kern_failure',
'wdog_timeout','fatal_error','lc_failure',
'match_any','manual_reload']),
state=dict(choices=['absent', 'present', 'default'],
default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[[
'system_mode_maintenance',
'system_mode_maintenance_dont_generate_profile',
'system_mode_maintenance_timeout',
'system_mode_maintenance_shutdown',
'system_mode_maintenance_on_reload_reset_reason'
]],
required_one_of=[[
'system_mode_maintenance',
'system_mode_maintenance_dont_generate_profile',
'system_mode_maintenance_timeout',
'system_mode_maintenance_shutdown',
'system_mode_maintenance_on_reload_reset_reason'
]],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
mode = get_system_mode(module)
commands = get_commands(module, state, mode)
changed = False
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
load_config(module, commands)
changed = True
result = {}
result['changed'] = changed
if module._verbosity > 0:
final_system_mode = get_system_mode(module)
result['final_system_mode'] = final_system_mode
result['updates'] = commands
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ajohnson23/depot_tools
|
refs/heads/master
|
third_party/logilab/common/testlib.py
|
64
|
# -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Run tests.
This will find all modules whose name match a given prefix in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v verbose -- run tests in verbose mode with output to stdout
-q quiet -- don't print anything except if a test fails
-t testdir -- directory where the tests will be found
-x exclude -- add a test to exclude
-p profile -- profiled execution
-d dbc -- enable design-by-contract
-m match -- only run test matching the tag pattern which follow
If no non-option arguments are present, prefixes used are 'test',
'regrtest', 'smoketest' and 'unittest'.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
# modified copy of some functions from test/regrtest.py from PyXml
# disable camel case warning
# pylint: disable=C0103
import sys
import os, os.path as osp
import re
import traceback
import inspect
import difflib
import tempfile
import math
import warnings
from shutil import rmtree
from operator import itemgetter
from itertools import dropwhile
from inspect import isgeneratorfunction
from six import string_types
from six.moves import builtins, range, configparser, input
from logilab.common.deprecation import deprecated
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2 as unittest
from unittest2 import SkipTest
except ImportError:
raise ImportError("You have to install python-unittest2 to use %s" % __name__)
else:
import unittest
from unittest import SkipTest
from functools import wraps
from logilab.common.debugger import Debugger, colorize_source
from logilab.common.decorators import cached, classproperty
from logilab.common import textutils
__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn']
DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest',
'func', 'validation')
is_generator = deprecated('[lgc 0.63] use inspect.isgeneratorfunction')(isgeneratorfunction)
# used by unittest to count the number of relevant levels in the traceback
__unittest = 1
def with_tempdir(callable):
"""A decorator ensuring no temporary file left when the function return
Work only for temporary file create with the tempfile module"""
if isgeneratorfunction(callable):
def proxy(*args, **kwargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
for x in callable(*args, **kwargs):
yield x
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
@wraps(callable)
def proxy(*args, **kargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
return callable(*args, **kargs)
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
def in_tempdir(callable):
"""A decorator moving the enclosed function inside the tempfile.tempfdir
"""
@wraps(callable)
def proxy(*args, **kargs):
old_cwd = os.getcwd()
os.chdir(tempfile.tempdir)
try:
return callable(*args, **kargs)
finally:
os.chdir(old_cwd)
return proxy
def within_tempdir(callable):
"""A decorator run the enclosed function inside a tmpdir removed after execution
"""
proxy = with_tempdir(in_tempdir(callable))
proxy.__name__ = callable.__name__
return proxy
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=True):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name.endswith(suffix):
for prefix in prefixes:
if name.startswith(prefix):
if remove_suffix and name.endswith(suffix):
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
## PostMortem Debug facilities #####
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print("Choose a test to debug:")
# order debuggers in the same way than errors were printed
print("\n".join(['\t%s : %s' % (i, descr) for i, (_, descr)
in enumerate(descrs)]))
print("Type 'exit' (or ^D) to quit")
print()
try:
todebug = input('Enter a test name: ')
if todebug.strip().lower() == 'exit':
print()
break
else:
try:
testindex = int(todebug)
debugger = debuggers[descrs[testindex][0]]
except (ValueError, IndexError):
print("ERROR: invalid test number %r" % (todebug, ))
else:
debugger.start()
except (EOFError, KeyboardInterrupt):
print()
break
# test utils ##################################################################
class SkipAwareTestResult(unittest._TextTestResult):
def __init__(self, stream, descriptions, verbosity,
exitfirst=False, pdbmode=False, cvg=None, colorize=False):
super(SkipAwareTestResult, self).__init__(stream,
descriptions, verbosity)
self.skipped = []
self.debuggers = []
self.fail_descrs = []
self.error_descrs = []
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.colorize = colorize
self.pdbclass = Debugger
self.verbose = verbosity > 1
def descrs_for(self, flavour):
return getattr(self, '%s_descrs' % flavour.lower())
def _create_pdb(self, test_descr, flavour):
self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
def _iter_valid_frames(self, frames):
"""only consider non-testlib frames when formatting traceback"""
lgc_testlib = osp.abspath(__file__)
std_testlib = osp.abspath(unittest.__file__)
invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib)
for frameinfo in dropwhile(invalid, frames):
yield frameinfo
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method is overridden here because we want to colorize
lines if --color is passed, and display local variables if
--verbose is passed
"""
exctype, exc, tb = err
output = ['Traceback (most recent call last)']
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
if ctx is None: # pyc files or C extensions for instance
source = '<no source available>'
else:
source = ''.join(ctx)
if colorize:
filename = textutils.colorize_ansi(filename, 'magenta')
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
output.append(' %s' % source.strip())
if self.verbose:
output.append('%r == %r' % (dir(frame), test.__module__))
output.append('')
output.append(' ' + ' local variables '.center(66, '-'))
for varname, value in sorted(frame.f_locals.items()):
output.append(' %s: %r' % (varname, value))
if varname == 'self': # special handy processing for self
for varname, value in sorted(vars(value).items()):
output.append(' self.%s: %r' % (varname, value))
output.append(' ' + '-' * 66)
output.append('')
output.append(''.join(traceback.format_exception_only(exctype, exc)))
return '\n'.join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
exc_type, exc, _ = err
if isinstance(exc, SkipTest):
assert exc_type == SkipTest
self.addSkip(test, exc)
else:
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
self._create_pdb(descr, 'error')
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
self._create_pdb(descr, 'fail')
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
self.stream.write('S')
def printErrors(self):
super(SkipAwareTestResult, self).printErrors()
self.printSkippedList()
def printSkippedList(self):
# format (test, err) compatible with unittest2
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % ('SKIPPED', descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors):
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln('no stdout'.center(len(self.separator2)))
self.stream.writeln('no stderr'.center(len(self.separator2)))
# Add deprecation warnings about new api used by module level fixtures in unittest2
# http://www.voidspace.org.uk/python/articles/unittest2.shtml#setupmodule-and-teardownmodule
class _DebugResult(object): # simplify import statement among unittest flavors..
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
from logilab.common.decorators import monkeypatch
@monkeypatch(unittest.TestSuite)
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'teardown_module'):
warnings.warn('Please rename teardown_module() to tearDownModule() instead.',
DeprecationWarning)
setattr(module, 'tearDownModule', module.teardown_module)
# end of monkey-patching
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
@monkeypatch(unittest.TestSuite)
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'setup_module'):
warnings.warn('Please rename setup_module() to setUpModule() instead.',
DeprecationWarning)
setattr(module, 'setUpModule', module.setup_module)
# end of monkey-patching
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
# backward compatibility: TestSuite might be imported from lgc.testlib
TestSuite = unittest.TestSuite
class keywords(dict):
"""Keyword args (**kwargs) support for generative tests."""
class starargs(tuple):
"""Variable arguments (*args) for generative tests."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
unittest_main = unittest.main
class InnerTestSkipped(SkipTest):
"""raised when a test is skipped"""
pass
def parse_generative_args(params):
args = []
varargs = ()
kwargs = {}
flags = 0 # 2 <=> starargs, 4 <=> kwargs
for param in params:
if isinstance(param, starargs):
varargs = param
if flags:
raise TypeError('found starargs after keywords !')
flags |= 2
args += list(varargs)
elif isinstance(param, keywords):
kwargs = param
if flags & 4:
raise TypeError('got multiple keywords parameters')
flags |= 4
elif flags & 2 or flags & 4:
raise TypeError('found parameters after kwargs or args')
else:
args.append(param)
return args, kwargs
class InnerTest(tuple):
def __new__(cls, name, *data):
instance = tuple.__new__(cls, data)
instance.name = name
return instance
class Tags(set):
"""A set of tag able validate an expression"""
def __init__(self, *tags, **kwargs):
self.inherit = kwargs.pop('inherit', True)
if kwargs:
raise TypeError("%s are an invalid keyword argument for this function" % kwargs.keys())
if len(tags) == 1 and not isinstance(tags[0], string_types):
tags = tags[0]
super(Tags, self).__init__(tags, **kwargs)
def __getitem__(self, key):
return key in self
def match(self, exp):
return eval(exp, {}, self)
# duplicate definition from unittest2 of the _deprecate decorator
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
class TestCase(unittest.TestCase):
"""A unittest.TestCase extension with some additional methods."""
maxDiff = None
pdbclass = Debugger
tags = Tags()
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.__exc_info = sys.exc_info
self.__testMethodName = self._testMethodName
self._current_test_descr = None
self._options_ = None
@classproperty
@cached
def datadir(cls): # pylint: disable=E0213
"""helper attribute holding the standard test's data directory
NOTE: this is a logilab's standard
"""
mod = __import__(cls.__module__)
return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')
# cache it (use a class method to cache on class since TestCase is
# instantiated for each test run)
@classmethod
def datapath(cls, *fname):
"""joins the object's datadir and `fname`"""
return osp.join(cls.datadir, *fname)
def set_description(self, descr):
"""sets the current test's description.
This can be useful for generative tests because it allows to specify
a description per yield
"""
self._current_test_descr = descr
# override default's unittest.py feature
def shortDescription(self):
"""override default unittest shortDescription to handle correctly
generative tests
"""
if self._current_test_descr is not None:
return self._current_test_descr
return super(TestCase, self).shortDescription()
def quiet_run(self, result, func, *args, **kwargs):
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except unittest.SkipTest as e:
if hasattr(result, 'addSkip'):
result.addSkip(self, str(e))
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
return False
except:
result.addError(self, self.__exc_info())
return False
return True
def _get_test_method(self):
"""return the test method"""
return getattr(self, self._testMethodName)
def optval(self, option, default=None):
"""return the option value or default if the option is not define"""
return getattr(self._options_, option, default)
def __call__(self, result=None, runcondition=None, options=None):
"""rewrite TestCase.__call__ to support generative tests
This is mostly a copy/paste from unittest.py (i.e same
variable names, same logic, except for the generative tests part)
"""
from logilab.common.pytest import FILE_RESTART
if result is None:
result = self.defaultTestResult()
result.pdbclass = self.pdbclass
self._options_ = options
# if result.cvg:
# result.cvg.start()
testMethod = self._get_test_method()
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
if runcondition and not runcondition(testMethod):
return # test is skipped
result.startTest(self)
try:
if not self.quiet_run(result, self.setUp):
return
generative = isgeneratorfunction(testMethod)
# generative tests
if generative:
self._proceed_generative(result, testMethod,
runcondition)
else:
status = self._proceed(result, testMethod)
success = (status == 0)
if not self.quiet_run(result, self.tearDown):
return
if not generative and success:
if hasattr(options, "exitfirst") and options.exitfirst:
# add this test to restart file
try:
restartfile = open(FILE_RESTART, 'a')
try:
descr = '.'.join((self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
restartfile.write(descr+os.linesep)
finally:
restartfile.close()
except Exception:
print("Error while saving succeeded test into",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
result.addSuccess(self)
finally:
# if result.cvg:
# result.cvg.stop()
result.stopTest(self)
def _proceed_generative(self, result, testfunc, runcondition=None):
# cancel startTest()'s increment
result.testsRun -= 1
success = True
try:
for params in testfunc():
if runcondition and not runcondition(testfunc,
skipgenerator=False):
if not (isinstance(params, InnerTest)
and runcondition(params)):
continue
if not isinstance(params, (tuple, list)):
params = (params, )
func = params[0]
args, kwargs = parse_generative_args(params[1:])
# increment test counter manually
result.testsRun += 1
status = self._proceed(result, func, args, kwargs)
if status == 0:
result.addSuccess(self)
success = True
else:
success = False
# XXX Don't stop anymore if an error occured
#if status == 2:
# result.shouldStop = True
if result.shouldStop: # either on error or on exitfirst + error
break
except:
# if an error occurs between two yield
result.addError(self, self.__exc_info())
success = False
return success
def _proceed(self, result, testfunc, args=(), kwargs=None):
"""proceed the actual test
returns 0 on success, 1 on failure, 2 on error
Note: addSuccess can't be called here because we have to wait
for tearDown to be successfully executed to declare the test as
successful
"""
kwargs = kwargs or {}
try:
testfunc(*args, **kwargs)
except self.failureException:
result.addFailure(self, self.__exc_info())
return 1
except KeyboardInterrupt:
raise
except InnerTestSkipped as e:
result.addSkip(self, e)
return 1
except SkipTest as e:
result.addSkip(self, e)
return 0
except:
result.addError(self, self.__exc_info())
return 2
return 0
def defaultTestResult(self):
"""return a new instance of the defaultTestResult"""
return SkipAwareTestResult()
skip = _deprecate(unittest.TestCase.skipTest)
assertEquals = _deprecate(unittest.TestCase.assertEqual)
assertNotEquals = _deprecate(unittest.TestCase.assertNotEqual)
assertAlmostEquals = _deprecate(unittest.TestCase.assertAlmostEqual)
assertNotAlmostEquals = _deprecate(unittest.TestCase.assertNotAlmostEqual)
def innerSkip(self, msg=None):
"""mark a generative test as skipped for the <msg> reason"""
msg = msg or 'test was skipped'
raise InnerTestSkipped(msg)
@deprecated('Please use assertDictEqual instead.')
def assertDictEquals(self, dict1, dict2, msg=None, context=None):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
:param dict1: a Python Dictionary
:param dict2: a Python Dictionary
:param msg: custom message (String) in case of failure
"""
dict1 = dict(dict1)
msgs = []
for key, value in dict2.items():
try:
if dict1[key] != value:
msgs.append('%r != %r for key %r' % (dict1[key], value,
key))
del dict1[key]
except KeyError:
msgs.append('missing %r key' % key)
if dict1:
msgs.append('dict2 is lacking %r' % dict1)
if msg:
self.failureException(msg)
elif msgs:
if context is not None:
base = '%s\n' % context
else:
base = ''
self.fail(base + '\n'.join(msgs))
@deprecated('Please use assertCountEqual instead.')
def assertUnorderedIterableEquals(self, got, expected, msg=None):
"""compares two iterable and shows difference between both
:param got: the unordered Iterable that we found
:param expected: the expected unordered Iterable
:param msg: custom message (String) in case of failure
"""
got, expected = list(got), list(expected)
self.assertSetEqual(set(got), set(expected), msg)
if len(got) != len(expected):
if msg is None:
msg = ['Iterable have the same elements but not the same number',
'\t<element>\t<expected>i\t<got>']
got_count = {}
expected_count = {}
for element in got:
got_count[element] = got_count.get(element, 0) + 1
for element in expected:
expected_count[element] = expected_count.get(element, 0) + 1
# we know that got_count.key() == expected_count.key()
# because of assertSetEqual
for element, count in got_count.iteritems():
other_count = expected_count[element]
if other_count != count:
msg.append('\t%s\t%s\t%s' % (element, other_count, count))
self.fail(msg)
assertUnorderedIterableEqual = assertUnorderedIterableEquals
assertUnordIterEquals = assertUnordIterEqual = assertUnorderedIterableEqual
@deprecated('Please use assertSetEqual instead.')
def assertSetEquals(self,got,expected, msg=None):
"""compares two sets and shows difference between both
Don't use it for iterables other than sets.
:param got: the Set that we found
:param expected: the second Set to be compared to the first one
:param msg: custom message (String) in case of failure
"""
if not(isinstance(got, set) and isinstance(expected, set)):
warnings.warn("the assertSetEquals function if now intended for set only."\
"use assertUnorderedIterableEquals instead.",
DeprecationWarning, 2)
return self.assertUnorderedIterableEquals(got, expected, msg)
items={}
items['missing'] = expected - got
items['unexpected'] = got - expected
if any(items.itervalues()):
if msg is None:
msg = '\n'.join('%s:\n\t%s' % (key, "\n\t".join(str(value) for value in values))
for key, values in items.iteritems() if values)
self.fail(msg)
@deprecated('Please use assertListEqual instead.')
def assertListEquals(self, list_1, list_2, msg=None):
"""compares two lists
If the two list differ, the first difference is shown in the error
message
:param list_1: a Python List
:param list_2: a second Python List
:param msg: custom message (String) in case of failure
"""
_l1 = list_1[:]
for i, value in enumerate(list_2):
try:
if _l1[0] != value:
from pprint import pprint
pprint(list_1)
pprint(list_2)
self.fail('%r != %r for index %d' % (_l1[0], value, i))
del _l1[0]
except IndexError:
if msg is None:
msg = 'list_1 has only %d elements, not %s '\
'(at least %r missing)'% (i, len(list_2), value)
self.fail(msg)
if _l1:
if msg is None:
msg = 'list_2 is lacking %r' % _l1
self.fail(msg)
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertLinesEquals(self, string1, string2, msg=None, striplines=False):
"""compare two strings and assert that the text lines of the strings
are equal.
:param string1: a String
:param string2: a String
:param msg: custom message (String) in case of failure
:param striplines: Boolean to trigger line stripping before comparing
"""
lines1 = string1.splitlines()
lines2 = string2.splitlines()
if striplines:
lines1 = [l.strip() for l in lines1]
lines2 = [l.strip() for l in lines2]
self.assertListEqual(lines1, lines2, msg)
assertLineEqual = assertLinesEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLWellFormed(self, stream, msg=None, context=2):
"""asserts the XML stream is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import parse
self._assertETXMLWellFormed(stream, parse, msg)
except ImportError:
from xml.sax import make_parser, SAXParseException
parser = make_parser()
try:
parser.parse(stream)
except SAXParseException as ex:
if msg is None:
stream.seek(0)
for _ in range(ex.getLineNumber()):
line = stream.readline()
pointer = ('' * (ex.getLineNumber() - 1)) + '^'
msg = 'XML stream not well formed: %s\n%s%s' % (ex, line, pointer)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLStringWellFormed(self, xml_string, msg=None, context=2):
"""asserts the XML string is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import fromstring
except ImportError:
from elementtree.ElementTree import fromstring
self._assertETXMLWellFormed(xml_string, fromstring, msg)
def _assertETXMLWellFormed(self, data, parse, msg=None, context=2):
"""internal function used by /assertXML(String)?WellFormed/ functions
:param data: xml_data
:param parse: appropriate parser function for this data
:param msg: error message
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
from xml.parsers.expat import ExpatError
try:
from xml.etree.ElementTree import ParseError
except ImportError:
# compatibility for <python2.7
ParseError = ExpatError
try:
parse(data)
except (ExpatError, ParseError) as ex:
if msg is None:
if hasattr(data, 'readlines'): #file like object
data.seek(0)
lines = data.readlines()
else:
lines = data.splitlines(True)
nb_lines = len(lines)
context_lines = []
# catch when ParseError doesn't set valid lineno
if ex.lineno is not None:
if context < 0:
start = 1
end = nb_lines
else:
start = max(ex.lineno-context, 1)
end = min(ex.lineno+context, nb_lines)
line_number_length = len('%i' % end)
line_pattern = " %%%ii: %%s" % line_number_length
for line_no in range(start, ex.lineno):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
context_lines.append(line_pattern % (ex.lineno, lines[ex.lineno-1]))
context_lines.append('%s^\n' % (' ' * (1 + line_number_length + 2 +ex.offset)))
for line_no in range(ex.lineno+1, end+1):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
rich_context = ''.join(context_lines)
msg = 'XML stream not well formed: %s\n%s' % (ex, rich_context)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLEqualsTuple(self, element, tup):
"""compare an ElementTree Element to a tuple formatted as follow:
(tagname, [attrib[, children[, text[, tail]]]])"""
# check tag
self.assertTextEquals(element.tag, tup[0])
# check attrib
if len(element.attrib) or len(tup)>1:
if len(tup)<=1:
self.fail( "tuple %s has no attributes (%s expected)"%(tup,
dict(element.attrib)))
self.assertDictEqual(element.attrib, tup[1])
# check children
if len(element) or len(tup)>2:
if len(tup)<=2:
self.fail( "tuple %s has no children (%i expected)"%(tup,
len(element)))
if len(element) != len(tup[2]):
self.fail( "tuple %s has %i children%s (%i expected)"%(tup,
len(tup[2]),
('', 's')[len(tup[2])>1], len(element)))
for index in range(len(tup[2])):
self.assertXMLEqualsTuple(element[index], tup[2][index])
#check text
if element.text or len(tup)>3:
if len(tup)<=3:
self.fail( "tuple %s has no text value (%r expected)"%(tup,
element.text))
self.assertTextEquals(element.text, tup[3])
#check tail
if element.tail or len(tup)>4:
if len(tup)<=4:
self.fail( "tuple %s has no tail value (%r expected)"%(tup,
element.tail))
self.assertTextEquals(element.tail, tup[4])
def _difftext(self, lines1, lines2, junk=None, msg_prefix='Texts differ'):
junk = junk or (' ', '\t')
# result is a generator
result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk)
read = []
for line in result:
read.append(line)
# lines that don't start with a ' ' are diff ones
if not line.startswith(' '):
self.fail('\n'.join(['%s\n'%msg_prefix]+read + list(result)))
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertTextEquals(self, text1, text2, junk=None,
msg_prefix='Text differ', striplines=False):
"""compare two multiline strings (using difflib and splitlines())
:param text1: a Python BaseString
:param text2: a second Python Basestring
:param junk: List of Caracters
:param msg_prefix: String (message prefix)
:param striplines: Boolean to trigger line stripping before comparing
"""
msg = []
if not isinstance(text1, string_types):
msg.append('text1 is not a string (%s)'%(type(text1)))
if not isinstance(text2, string_types):
msg.append('text2 is not a string (%s)'%(type(text2)))
if msg:
self.fail('\n'.join(msg))
lines1 = text1.strip().splitlines(True)
lines2 = text2.strip().splitlines(True)
if striplines:
lines1 = [line.strip() for line in lines1]
lines2 = [line.strip() for line in lines2]
self._difftext(lines1, lines2, junk, msg_prefix)
assertTextEqual = assertTextEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertStreamEquals(self, stream1, stream2, junk=None,
msg_prefix='Stream differ'):
"""compare two streams (using difflib and readlines())"""
# if stream2 is stream2, readlines() on stream1 will also read lines
# in stream2, so they'll appear different, although they're not
if stream1 is stream2:
return
# make sure we compare from the beginning of the stream
stream1.seek(0)
stream2.seek(0)
# compare
self._difftext(stream1.readlines(), stream2.readlines(), junk,
msg_prefix)
assertStreamEqual = assertStreamEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertFileEquals(self, fname1, fname2, junk=(' ', '\t')):
"""compares two files using difflib"""
self.assertStreamEqual(open(fname1), open(fname2), junk,
msg_prefix='Files differs\n-:%s\n+:%s\n'%(fname1, fname2))
assertFileEqual = assertFileEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertDirEquals(self, path_a, path_b):
"""compares two files using difflib"""
assert osp.exists(path_a), "%s doesn't exists" % path_a
assert osp.exists(path_b), "%s doesn't exists" % path_b
all_a = [ (ipath[len(path_a):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_a)]
all_a.sort(key=itemgetter(0))
all_b = [ (ipath[len(path_b):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_b)]
all_b.sort(key=itemgetter(0))
iter_a, iter_b = iter(all_a), iter(all_b)
partial_iter = True
ipath_a, idirs_a, ifiles_a = data_a = None, None, None
while True:
try:
ipath_a, idirs_a, ifiles_a = datas_a = next(iter_a)
partial_iter = False
ipath_b, idirs_b, ifiles_b = datas_b = next(iter_b)
partial_iter = True
self.assertTrue(ipath_a == ipath_b,
"unexpected %s in %s while looking %s from %s" %
(ipath_a, path_a, ipath_b, path_b))
errors = {}
sdirs_a = set(idirs_a)
sdirs_b = set(idirs_b)
errors["unexpected directories"] = sdirs_a - sdirs_b
errors["missing directories"] = sdirs_b - sdirs_a
sfiles_a = set(ifiles_a)
sfiles_b = set(ifiles_b)
errors["unexpected files"] = sfiles_a - sfiles_b
errors["missing files"] = sfiles_b - sfiles_a
msgs = [ "%s: %s"% (name, items)
for name, items in errors.items() if items]
if msgs:
msgs.insert(0, "%s and %s differ :" % (
osp.join(path_a, ipath_a),
osp.join(path_b, ipath_b),
))
self.fail("\n".join(msgs))
for files in (ifiles_a, ifiles_b):
files.sort()
for index, path in enumerate(ifiles_a):
self.assertFileEquals(osp.join(path_a, ipath_a, path),
osp.join(path_b, ipath_b, ifiles_b[index]))
except StopIteration:
break
assertDirEqual = assertDirEquals
def assertIsInstance(self, obj, klass, msg=None, strict=False):
"""check if an object is an instance of a class
:param obj: the Python Object to be checked
:param klass: the target class
:param msg: a String for a custom message
:param strict: if True, check that the class of <obj> is <klass>;
else check with 'isinstance'
"""
if strict:
warnings.warn('[API] Non-standard. Strict parameter has vanished',
DeprecationWarning, stacklevel=2)
if msg is None:
if strict:
msg = '%r is not of class %s but of %s'
else:
msg = '%r is not an instance of %s but of %s'
msg = msg % (obj, klass, type(obj))
if strict:
self.assertTrue(obj.__class__ is klass, msg)
else:
self.assertTrue(isinstance(obj, klass), msg)
@deprecated('Please use assertIsNone instead.')
def assertNone(self, obj, msg=None):
"""assert obj is None
:param obj: Python Object to be tested
"""
if msg is None:
msg = "reference to %r when None expected"%(obj,)
self.assertTrue( obj is None, msg )
@deprecated('Please use assertIsNotNone instead.')
def assertNotNone(self, obj, msg=None):
"""assert obj is not None"""
if msg is None:
msg = "unexpected reference to None"
self.assertTrue( obj is not None, msg )
@deprecated('Non-standard. Please use assertAlmostEqual instead.')
def assertFloatAlmostEquals(self, obj, other, prec=1e-5,
relative=False, msg=None):
"""compares if two floats have a distance smaller than expected
precision.
:param obj: a Float
:param other: another Float to be comparted to <obj>
:param prec: a Float describing the precision
:param relative: boolean switching to relative/absolute precision
:param msg: a String for a custom message
"""
if msg is None:
msg = "%r != %r" % (obj, other)
if relative:
prec = prec*math.fabs(obj)
self.assertTrue(math.fabs(obj - other) < prec, msg)
def failUnlessRaises(self, excClass, callableObj=None, *args, **kwargs):
"""override default failUnlessRaises method to return the raised
exception instance.
Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
CAUTION! There are subtle differences between Logilab and unittest2
- exc is not returned in standard version
- context capabilities in standard version
- try/except/else construction (minor)
:param excClass: the Exception to be raised
:param callableObj: a callable Object which should raise <excClass>
:param args: a List of arguments for <callableObj>
:param kwargs: a List of keyword arguments for <callableObj>
"""
# XXX cube vcslib : test_branches_from_app
if callableObj is None:
_assert = super(TestCase, self).assertRaises
return _assert(excClass, callableObj, *args, **kwargs)
try:
callableObj(*args, **kwargs)
except excClass as exc:
class ProxyException:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, attr):
warn_msg = ("This exception was retrieved with the old testlib way "
"`exc = self.assertRaises(Exc, callable)`, please use "
"the context manager instead'")
warnings.warn(warn_msg, DeprecationWarning, 2)
return self._obj.__getattribute__(attr)
return ProxyException(exc)
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
assertRaises = failUnlessRaises
if sys.version_info >= (3,2):
assertItemsEqual = unittest.TestCase.assertCountEqual
else:
assertCountEqual = unittest.TestCase.assertItemsEqual
if sys.version_info < (2,7):
def assertIsNotNone(self, value, *args, **kwargs):
self.assertNotEqual(None, value, *args, **kwargs)
TestCase.assertItemsEqual = deprecated('assertItemsEqual is deprecated, use assertCountEqual')(
TestCase.assertItemsEqual)
import doctest
class SkippedSuite(unittest.TestSuite):
def test(self):
"""just there to trigger test execution"""
self.skipped_test('doctest module has no DocTestSuite class')
class DocTestFinder(doctest.DocTestFinder):
def __init__(self, *args, **kwargs):
self.skipped = kwargs.pop('skipped', ())
doctest.DocTestFinder.__init__(self, *args, **kwargs)
def _get_test(self, obj, name, module, globs, source_lines):
"""override default _get_test method to be able to skip tests
according to skipped attribute's value
"""
if getattr(obj, '__name__', '') in self.skipped:
return None
return doctest.DocTestFinder._get_test(self, obj, name, module,
globs, source_lines)
class DocTest(TestCase):
"""trigger module doctest
I don't know how to make unittest.main consider the DocTestSuite instance
without this hack
"""
skipped = ()
def __call__(self, result=None, runcondition=None, options=None):\
# pylint: disable=W0613
try:
finder = DocTestFinder(skipped=self.skipped)
suite = doctest.DocTestSuite(self.module, test_finder=finder)
# XXX iirk
doctest.DocTestCase._TestCase__exc_info = sys.exc_info
except AttributeError:
suite = SkippedSuite()
# doctest may gork the builtins dictionnary
# This happen to the "_" entry used by gettext
old_builtins = builtins.__dict__.copy()
try:
return suite.run(result)
finally:
builtins.__dict__.clear()
builtins.__dict__.update(old_builtins)
run = __call__
def test(self):
"""just there to trigger test execution"""
MAILBOX = None
class MockSMTP:
"""fake smtplib.SMTP"""
def __init__(self, host, port):
self.host = host
self.port = port
global MAILBOX
self.reveived = MAILBOX = []
def set_debuglevel(self, debuglevel):
"""ignore debug level"""
def sendmail(self, fromaddr, toaddres, body):
"""push sent mail in the mailbox"""
self.reveived.append((fromaddr, toaddres, body))
def quit(self):
"""ignore quit"""
class MockConfigParser(configparser.ConfigParser):
"""fake ConfigParser.ConfigParser"""
def __init__(self, options):
configparser.ConfigParser.__init__(self)
for section, pairs in options.iteritems():
self.add_section(section)
for key, value in pairs.iteritems():
self.set(section, key, value)
def write(self, _):
raise NotImplementedError()
class MockConnection:
"""fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
def __init__(self, results):
self.received = []
self.states = []
self.results = results
def cursor(self):
"""Mock cursor method"""
return self
def execute(self, query, args=None):
"""Mock execute method"""
self.received.append( (query, args) )
def fetchone(self):
"""Mock fetchone method"""
return self.results[0]
def fetchall(self):
"""Mock fetchall method"""
return self.results
def commit(self):
"""Mock commiy method"""
self.states.append( ('commit', len(self.received)) )
def rollback(self):
"""Mock rollback method"""
self.states.append( ('rollback', len(self.received)) )
def close(self):
"""Mock close method"""
pass
def mock_object(**params):
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type('Mock', (), params)()
def create_files(paths, chroot):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = osp.join(chroot, path)
filename = osp.basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(osp.dirname(path))
files.add(path)
for dirpath in dirs:
if not osp.isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class AttrObject: # XXX cf mock_object
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def tag(*args, **kwargs):
"""descriptor adding tag to a function"""
def desc(func):
assert not hasattr(func, 'tags')
func.tags = Tags(*args, **kwargs)
return func
return desc
def require_version(version):
""" Compare version of python interpreter to the given one. Skip the test
if older.
"""
def check_require_version(f):
version_elements = version.split('.')
try:
compare = tuple([int(v) for v in version_elements])
except ValueError:
raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)
current = sys.version_info[:3]
if current < compare:
def new_f(self, *args, **kwargs):
self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))
new_f.__name__ = f.__name__
return new_f
else:
return f
return check_require_version
def require_module(module):
""" Check if the given module is loaded. Skip the test if not.
"""
def check_require_module(f):
try:
__import__(module)
return f
except ImportError:
def new_f(self, *args, **kwargs):
self.skipTest('%s can not be imported.' % module)
new_f.__name__ = f.__name__
return new_f
return check_require_module
|
maddox/home-assistant
|
refs/heads/dev
|
tests/util/test_dt.py
|
28
|
"""
tests.test_util
~~~~~~~~~~~~~~~~~
Tests Home Assistant date util methods.
"""
# pylint: disable=too-many-public-methods
import unittest
from datetime import datetime, timedelta
import homeassistant.util.dt as dt_util
TEST_TIME_ZONE = 'America/Los_Angeles'
class TestDateUtil(unittest.TestCase):
""" Tests util date methods. """
def setUp(self):
self.orig_default_time_zone = dt_util.DEFAULT_TIME_ZONE
def tearDown(self):
dt_util.set_default_time_zone(self.orig_default_time_zone)
def test_get_time_zone_retrieves_valid_time_zone(self):
""" Test getting a time zone. """
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
self.assertIsNotNone(time_zone)
self.assertEqual(TEST_TIME_ZONE, time_zone.zone)
def test_get_time_zone_returns_none_for_garbage_time_zone(self):
""" Test getting a non existing time zone. """
time_zone = dt_util.get_time_zone("Non existing time zone")
self.assertIsNone(time_zone)
def test_set_default_time_zone(self):
""" Test setting default time zone. """
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
dt_util.set_default_time_zone(time_zone)
# We cannot compare the timezones directly because of DST
self.assertEqual(time_zone.zone, dt_util.now().tzinfo.zone)
def test_utcnow(self):
""" Test the UTC now method. """
self.assertAlmostEqual(
dt_util.utcnow().replace(tzinfo=None),
datetime.utcnow(),
delta=timedelta(seconds=1))
def test_now(self):
""" Test the now method. """
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
self.assertAlmostEqual(
dt_util.as_utc(dt_util.now()).replace(tzinfo=None),
datetime.utcnow(),
delta=timedelta(seconds=1))
def test_as_utc_with_naive_object(self):
utcnow = datetime.utcnow()
self.assertEqual(utcnow,
dt_util.as_utc(utcnow).replace(tzinfo=None))
def test_as_utc_with_utc_object(self):
utcnow = dt_util.utcnow()
self.assertEqual(utcnow, dt_util.as_utc(utcnow))
def test_as_utc_with_local_object(self):
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
localnow = dt_util.now()
utcnow = dt_util.as_utc(localnow)
self.assertEqual(localnow, utcnow)
self.assertNotEqual(localnow.tzinfo, utcnow.tzinfo)
def test_as_local_with_naive_object(self):
now = dt_util.now()
self.assertAlmostEqual(
now, dt_util.as_local(datetime.utcnow()),
delta=timedelta(seconds=1))
def test_as_local_with_local_object(self):
now = dt_util.now()
self.assertEqual(now, now)
def test_as_local_with_utc_object(self):
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
utcnow = dt_util.utcnow()
localnow = dt_util.as_local(utcnow)
self.assertEqual(localnow, utcnow)
self.assertNotEqual(localnow.tzinfo, utcnow.tzinfo)
def test_utc_from_timestamp(self):
""" Test utc_from_timestamp method. """
self.assertEqual(
datetime(1986, 7, 9, tzinfo=dt_util.UTC),
dt_util.utc_from_timestamp(521251200))
def test_datetime_to_str(self):
""" Test datetime_to_str. """
self.assertEqual(
"12:00:00 09-07-1986",
dt_util.datetime_to_str(datetime(1986, 7, 9, 12, 0, 0)))
def test_datetime_to_local_str(self):
""" Test datetime_to_local_str. """
self.assertEqual(
dt_util.datetime_to_str(dt_util.now()),
dt_util.datetime_to_local_str(dt_util.utcnow()))
def test_str_to_datetime_converts_correctly(self):
""" Test str_to_datetime converts strings. """
self.assertEqual(
datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC),
dt_util.str_to_datetime("12:00:00 09-07-1986"))
def test_str_to_datetime_returns_none_for_incorrect_format(self):
""" Test str_to_datetime returns None if incorrect format. """
self.assertIsNone(dt_util.str_to_datetime("not a datetime string"))
def test_strip_microseconds(self):
test_time = datetime(2015, 1, 1, microsecond=5000)
self.assertNotEqual(0, test_time.microsecond)
self.assertEqual(0, dt_util.strip_microseconds(test_time).microsecond)
|
sbesson/PyGithub
|
refs/heads/master
|
tests/BranchProtection.py
|
3
|
############################ Copyrights and license ############################
# #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from . import Framework
class BranchProtection(Framework.TestCase):
def setUp(self):
super().setUp()
self.branch_protection = (
self.g.get_user()
.get_repo("PyGithub")
.get_branch("integrations")
.get_protection()
)
def testAttributes(self):
self.assertTrue(self.branch_protection.required_status_checks.strict)
self.assertEqual(
self.branch_protection.required_status_checks.contexts, ["foo/bar"]
)
self.assertEqual(
self.branch_protection.url,
"https://api.github.com/repos/jacquev6/PyGithub/branches/integrations/protection",
)
self.assertEqual(
self.branch_protection.__repr__(),
'BranchProtection(url="https://api.github.com/repos/jacquev6/PyGithub/branches/integrations/protection")',
)
|
myRisk/dynamicDNA
|
refs/heads/master
|
setuptools-15.2/setuptools/command/upload_docs.py
|
390
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
from pkg_resources import iter_entry_points
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
Open-Plus/opgui
|
refs/heads/master
|
lib/python/Components/Converter/EventName.py
|
8
|
from enigma import eEPGCache
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.Converter.genre import getGenreStringSub
class EventName(Converter, object):
NAME = 0
SHORT_DESCRIPTION = 1
EXTENDED_DESCRIPTION = 2
FULL_DESCRIPTION = 3
ID = 4
NAME_NOW = 5
NAME_NEXT = 6
NAME_NEXT2 = 7
GENRE = 8
RATING = 9
SRATING = 10
RAWRATING = 11
RATINGCOUNTRY = 12
EVENT_EXTRADATA = 13
EPG_SOURCE = 14
NEXT_DESCRIPTION = 21
THIRD_NAME = 22
THIRD_DESCRIPTION = 23
AUSSHORT = 0
AUSLONG = 1
AUSTEXT = {
"NC" : (" ", "Not Classified"),
"P" : ("P", "Preschool"),
"C" : ("C", "Children"),
"G" : ("G", "General"),
"PG" : ("PG", "Parental Guidance Recommended"),
"M" : ("M", "Mature Audience 15+"),
"MA" : ("MA", "Mature Adult Audience 15+"),
"AV" : ("AV", "Adult Audience, Strong Violence 15+"),
"R" : ("R", "Restricted 18+")
}
AUSRATINGS = {
0 : AUSTEXT["NC"],
1 : AUSTEXT["NC"],
2 : AUSTEXT["P"],
3 : AUSTEXT["P"],
4 : AUSTEXT["C"],
5 : AUSTEXT["C"],
6 : AUSTEXT["G"],
7 : AUSTEXT["G"],
8 : AUSTEXT["PG"],
9 : AUSTEXT["PG"],
10 : AUSTEXT["M"],
11 : AUSTEXT["M"],
12 : AUSTEXT["MA"],
13 : AUSTEXT["MA"],
14 : AUSTEXT["AV"],
15 : AUSTEXT["R"]
}
def __init__(self, type):
Converter.__init__(self, type)
self.epgcache = eEPGCache.getInstance()
args = type.split(',')
args = [arg.strip() for arg in args]
type = args.pop(0)
if "Separated" in args:
self.SEPARATOR = "\n\n"
else:
self.SEPARATOR = "\n"
if "Trimmed" in args:
self.TRIM = True
else:
self.TRIM = False
if type == "Description":
self.type = self.SHORT_DESCRIPTION
elif type == "ExtendedDescription":
self.type = self.EXTENDED_DESCRIPTION
elif type == "FullDescription":
self.type = self.FULL_DESCRIPTION
elif type == "ID":
self.type = self.ID
elif type == "NameNow" or type == "NowName":
self.type = self.NAME_NOW
elif type == "NameNext" or type == "NextName":
self.type = self.NAME_NEXT
elif type == "NameNextOnly" or type == "NextNameOnly":
self.type = self.NAME_NEXT2
elif type == "Genre":
self.type = self.GENRE
elif type == "Rating":
self.type = self.RATING
elif type == "SmallRating":
self.type = self.SRATING
elif type == "RawRating":
self.type = self.RAWRATING
elif type == "RatingCountry":
self.type = self.RATINGCOUNTRY
elif type == "EventExtraData":
self.type = self.EVENT_EXTRADATA
elif type == "EPGSource":
self.type = self.EPG_SOURCE
elif type == "NextDescription":
self.type = self.NEXT_DESCRIPTION
elif type == "ThirdName":
self.type = self.THIRD_NAME
elif type == "ThirdDescription":
self.type = self.THIRD_DESCRIPTION
else:
self.type = self.NAME
def trimText(self, text):
if self.TRIM:
return str(text).strip()
else:
return text
@cached
def getText(self):
event = self.source.event
if event is None:
return ""
if self.type == self.NAME:
return self.trimText(event.getEventName())
elif self.type == self.RATINGCOUNTRY:
rating = event.getParentalData()
if rating is None:
return ""
else:
return rating.getCountryCode()
elif self.type == self.RAWRATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
return "%d" % int(rating.getRating())
elif self.type == self.SRATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = int(rating.getRating())
if country.upper() == "AUS":
errmsg = _("BC%d") % age
undef = (errmsg, "")
return _(self.AUSRATINGS.get(age, undef)[self.AUSSHORT])
else:
if age == 0:
return _("All ages")
elif age > 15:
return _("bc%d") % age
else:
age += 3
return " %d+" % age
elif self.type == self.RATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = int(rating.getRating())
if country.upper() == "AUS":
errmsg = _("Defined By Broadcaster (%d)") % age
undef = ("", errmsg)
return _(self.AUSRATINGS.get(age, undef)[self.AUSLONG])
else:
if age == 0:
return _("Rating undefined")
elif age > 15:
return _("Rating defined by broadcaster - %d") % age
else:
age += 3
return _("Minimum age %d years") % age
elif self.type == self.GENRE:
genre = event.getGenreData()
if genre is None:
return ""
else:
return self.trimText(getGenreStringSub(genre.getLevel1(), genre.getLevel2()))
elif self.type == self.NAME_NOW:
return pgettext("now/next: 'now' event label", "Now") + ": " + self.trimText(event.getEventName())
elif self.type == self.SHORT_DESCRIPTION:
return self.trimText(event.getShortDescription())
elif self.type == self.EXTENDED_DESCRIPTION:
return self.trimText(event.getExtendedDescription() or event.getShortDescription())
elif self.type == self.FULL_DESCRIPTION:
description = self.trimText(event.getShortDescription())
extended = self.trimText(event.getExtendedDescription())
if description and extended and description.replace('\n','') == extended.replace('\n',''):
return extended
if description and extended:
description += self.SEPARATOR
return description + extended
elif self.type == self.ID:
return str(event.getEventId())
elif self.type == self.EVENT_EXTRADATA:
pass
#not include yet
#ret = event.getExtraEventData()
elif self.type == self.EPG_SOURCE:
pass
#not include yet
#ret = event.getEPGSource()
elif int(self.type) in (6, 7) or int(self.type) >= 21:
try:
reference = self.source.service
info = reference and self.source.info
if info is None:
return
test = ['ITSECX', (reference.toString(), 1, -1, 1440)] # search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
if self.type == self.NAME_NEXT and self.list[1][1]:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(self.list[1][1])
elif self.type == self.NAME_NEXT2 and self.list[1][1]:
return self.trimText(self.list[1][1])
elif self.type == self.NEXT_DESCRIPTION and (self.list[1][2] or self.list[1][3]):
description = self.trimText(self.list[1][2])
extended = self.trimText(self.list[1][3])
if (description and extended) and (description[0:20] != extended[0:20]):
description += self.SEPARATOR
return description + extended
elif self.type == self.THIRD_NAME and self.list[2][1]:
return pgettext("third event: 'third' event label", "Later") + ": " + self.trimText(self.list[2][1])
elif self.type == self.THIRD_DESCRIPTION and (self.list[2][2] or self.list[2][3]):
description = self.trimText(self.list[2][2])
extended = self.trimText(self.list[2][3])
if (description and extended) and (description[0:20] != extended[0:20]):
description += self.SEPARATOR
return description + extended
else:
# failed to return any epg data.
return ""
except:
# failed to return any epg data.
if self.type == self.NAME_NEXT:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(event.getEventName())
return ""
text = property(getText)
|
tom111/tatt
|
refs/heads/master
|
tatt/scriptwriter.py
|
1
|
""" Filling script templates """
import random
import os
import portage
import sys
from .gentooPackage import gentooPackage as gP
from .usecombis import findUseFlagCombis
from .tinderbox import stablerdeps
from .tool import unique
from portage.dep import dep_getkey
#### USE-COMBIS ########
def scriptTemplate(job, config, filename):
""" open snippet file and replace common placeholders """
try:
snippetfile=open(config['template-dir'] + filename, 'r')
except IOError:
print("template " + filename + " not found in " + config['template-dir'])
sys.exit(1)
reportname = job.name + ".report"
if job.type == "stable":
newkeyword = config['arch']
elif job.type == "keyword":
newkeyword = "~" + config['arch']
else:
print ("No job type? Can't continue. This is a bug")
sys.exit(1)
snippet = snippetfile.read()
snippet = snippet.replace("@@EMERGEOPTS@@", config['emergeopts'])
if job.bugnumber:
snippet = snippet.replace("@@BUG@@", job.bugnumber)
else:
snippet = snippet.replace("@@BUG@@", '')
snippet = snippet.replace("@@JOB@@", job.name)
snippet = snippet.replace("@@ARCH@@", config['arch'])
snippet = snippet.replace("@@REPODIR@@", config['repodir'])
snippet = snippet.replace("@@REPORTFILE@@", reportname)
snippet = snippet.replace("@@BUILDLOGDIR@@", config['buildlogdir'])
snippet = snippet.replace("@@NEWKEYWORD@@", newkeyword)
snippet = snippet.replace("@@TEMPLATEDIR@@", config['template-dir'])
return snippet
def useCombiTestString(job, pack, config, port):
""" Build with diffent useflag combis """
usesnippet = scriptTemplate(job, config, "use-snippet")
usesnippet = usesnippet.replace("@@CPV@@", pack.packageString() )
# test once with tests and users flags
# do this first to trigger bugs in some packages where the test suite relies on
# the package being already installed
usetestsnippet = scriptTemplate(job, config, "use-test-snippet")
usetestsnippet = usetestsnippet.replace("@@CPV@@", pack.packageString() )
s = usetestsnippet.replace("@@USE@@", "")
usecombis = findUseFlagCombis (pack, config, port)
for uc in usecombis:
localsnippet = usesnippet.replace("@@USE@@", uc)
s = s + localsnippet
return s
def writeusecombiscript(job, config):
# job is a tatt job object
# config is a tatt configuration
useheader = scriptTemplate(job, config, "use-header")
if os.path.exists(config['template-dir'] + "use-loop"):
useloop = scriptTemplate(job, config, "use-loop")
else:
useloop = "@@LOOP_BODY@@"
outfilename = (job.name + "-useflags.sh")
reportname = (job.name + ".report")
if os.path.isfile(outfilename):
print("WARNING: Will overwrite " + outfilename)
outfile = open(outfilename, 'w')
outfile.write(useheader)
port = portage.db[portage.root]["porttree"].dbapi
for p in job.packageList:
loop = useloop.replace("@@LOOP_BODY@@", useCombiTestString(job, p, config, port))
loop = loop.replace("@@CPV@@", p.packageString())
outfile.write(loop)
if os.path.exists(config['template-dir'] + "use-footer"):
footer = scriptTemplate(job, config, "use-footer")
outfile.write(footer)
# Note: fchmod needs the filedescriptor which is an internal
# integer retrieved by fileno().
os.fchmod(outfile.fileno(), 0o744) # rwxr--r--
outfile.close()
######################################
############ RDEPS ################
def rdepTestString(job, rdep, config):
snip = scriptTemplate(job, config, "revdep-snippet")
uflags = []
for st in rdep[1]:
st = st.strip()
if len(st) == 0:
continue
if st[0] == "!":
uflags.append("-" + st[1:])
else:
uflags.append(st)
ustring = "USE=\'" + " ".join(uflags) + "\'"
snip = snip.replace("@@USE@@", ustring)
snip = snip.replace("@@CPV@@", rdep[0] )
snip = snip.replace("@@EMERGEOPTS@@", config['emergeopts'])
return snip
def writerdepscript(job, config):
# Populate the list of rdeps
# while at it also create a list of only the package names
rdeps = []
pkgs = []
for p in job.packageList:
atom = p.packageCatName()
pkgs.append(atom)
rdeps = rdeps + stablerdeps (atom, config)
if len(rdeps) == 0:
print("No stable rdeps for " + job.name)
return
# now clean the list
# first find all those entries that have no useflags and main packages of this job
for i in range(len(rdeps) - 1, 0, -1):
r = rdeps[i]
hasU = False
for st in r[1]:
if len(st.strip()) > 0:
hasU = True
break
if hasU:
continue
if r[0] in pkgs:
rdeps.pop(i)
# If there are rdeps, write the script
rdepheader = scriptTemplate(job, config, "revdep-header")
outfilename = (job.name + "-rdeps.sh")
if os.path.isfile(outfilename):
print("WARNING: Will overwrite " + outfilename)
outfile = open(outfilename,'w')
outfile.write(rdepheader)
for r in rdeps:
# Todo: remove duplicates
outfile.write(rdepTestString(job, r, config))
os.fchmod(outfile.fileno(), 0o744)
outfile.close()
#######Write report script############
def writesucessreportscript (job, config):
outfilename = (job.name + "-success.sh")
if os.path.isfile(outfilename):
print("WARNING: Will overwrite " + outfilename)
updatebug = scriptTemplate(job, config, "updatebug")
outfile = open(outfilename,'w')
outfile.write(updatebug)
os.fchmod(outfile.fileno(), 0o744)
outfile.close()
print("Success Report script written to " + outfilename)
####### Write the commit script #########
def writecommitscript (job, config):
cheader = scriptTemplate(job, config, "commit-header")
csnippet = scriptTemplate(job, config, "commit-snippet")
csnippet2 = scriptTemplate(job, config, "commit-snippet-2")
cfooter = scriptTemplate(job, config, "commit-footer")
outfilename = (job.name + "-commit.sh")
if os.path.isfile(outfilename):
print("WARNING: Will overwrite " + outfilename)
outfile = open(outfilename,'w')
outfile.write (cheader)
# Here's a catch: If there are multiple versions of the same package to be
# stabilized, then we want only one keywording block and one commit block
# for them. Therefore we split up the loop by sorting job.packlist
# accordingly, saving them in a hash-table with the package names as keys
# and the packages as values.
packageHash = dict();
for pack in job.packageList:
if pack.packageCatName() in packageHash:
packageHash[pack.packageCatName()] = packageHash[pack.packageCatName()] + [pack]
else:
packageHash[pack.packageCatName()] = [pack]
# First round (ekeyword)
for pack in packageHash.keys():
# Prepare a list of ebuild names strings
ebuilds = [p.packageName()+"-"+p.packageVersion()+".ebuild" for p in packageHash[pack]]
s = csnippet.replace("@@EBUILD@@", " ".join(ebuilds))
s = s.replace("@@CP@@", pack)
outfile.write(s)
# Second round: repoman -d full checks and commit, should be done once per
# key of packageHash
for pack in packageHash.keys():
# Prepare a list of ebuild names strings
ebuilds = [p.packageName()+"-"+p.packageVersion()+".ebuild" for p in packageHash[pack]]
s = csnippet2.replace("@@EBUILD@@", " ".join(ebuilds))
s = s.replace("@@CP@@", pack)
outfile.write(s)
# Footer (committing)
outfile.write(cfooter)
os.fchmod(outfile.fileno(), 0o744)
outfile.close()
print("Commit script written to " + outfilename)
######## Write clean-up script ##############
def writeCleanUpScript (job, config, unmaskname):
script = scriptTemplate(job, config, "cleanup")
script = script.replace("@@KEYWORDFILE@@", unmaskname)
outfilename = (job.name + "-cleanup.sh")
if os.path.isfile(outfilename):
print("WARNING: Will overwrite " + outfilename)
outfile = open(outfilename,'w')
outfile.write(script)
os.fchmod(outfile.fileno(), 0o744)
outfile.close()
|
WeblateOrg/weblate
|
refs/heads/main
|
weblate/lang/management/commands/list_languages.py
|
2
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.utils.translation import activate, gettext
from weblate.lang.models import Language
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "List language definitions"
def add_arguments(self, parser):
parser.add_argument(
"--lower", action="store_true", help="Lowercase translated name"
)
parser.add_argument("locale", help="Locale for printing")
def handle(self, *args, **options):
"""Create default set of languages.
Optionally updating them to match current shipped definitions.
"""
activate(options["locale"])
for language in Language.objects.order():
name = gettext(language.name)
if options["lower"]:
name = name[0].lower() + name[1:]
self.stdout.write(f"| {language.code} || {language.name} || {name}")
self.stdout.write("|-")
|
follow99/django
|
refs/heads/master
|
django/contrib/gis/db/models/query.py
|
39
|
import warnings
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name'))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name')
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango20Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type'))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field'):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name'))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
|
PaulWay/spacewalk
|
refs/heads/master
|
client/solaris/smartpm/smart/backends/deb/base.py
|
1
|
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.backends.deb.debver import vercmp, checkdep, splitrelease
from smart.backends.deb.pm import DebPackageManager
from smart.util.strtools import isGlob
from smart.cache import *
import fnmatch
import string
import os, re
__all__ = ["DebPackage", "DebProvides", "DebNameProvides", "DebPreRequires",
"DebRequires", "DebUpgrades", "DebConflicts", "DebOrRequires",
"DebOrPreRequires"]
def getArchitecture():
arch = os.uname()[-1]
result = {"pentium": "i386",
"sparc64": "sparc",
"ppc": "powerpc",
"mipseb": "mips",
"shel": "sh"}.get(arch)
if result:
return result
elif len(arch) == 4 and arch[0] == "i" and arch.endswith("86"):
return "i386"
elif arch.startswith("hppa"):
return "hppa"
elif arch.startswith("alpha"):
return "alpha"
else:
return arch
DEBARCH = sysconf.get("deb-arch", getArchitecture())
class DebPackage(Package):
packagemanager = DebPackageManager
def coexists(self, other):
if not isinstance(other, DebPackage):
return True
return False
def matches(self, relation, version):
if not relation:
return True
return checkdep(self.version, relation, version)
def search(self, searcher):
myname = self.name
myversion = self.version
ratio = 0
for nameversion, cutoff in searcher.nameversion:
_, ratio1 = globdistance(nameversion, myname, cutoff)
_, ratio2 = globdistance(nameversion,
"%s-%s" % (myname, myversion), cutoff)
_, ratio3 = globdistance(nameversion, "%s-%s" %
(myname, splitrelease(myversion)[0]),
cutoff)
ratio = max(ratio, ratio1, ratio2, ratio3)
if ratio:
searcher.addResult(self, ratio)
def __lt__(self, other):
rc = cmp(self.name, other.name)
if type(other) is DebPackage:
if rc == 0 and self.version != other.version:
rc = vercmp(self.version, other.version)
return rc == -1
class DebProvides(Provides): pass
class DebNameProvides(DebProvides): pass
class DebDepends(Depends):
def matches(self, prv):
if not isinstance(prv, DebProvides) and type(prv) is not Provides:
return False
if not self.version:
return True
if not prv.version:
return False
return checkdep(prv.version, self.relation, self.version)
class DebPreRequires(DebDepends,PreRequires): pass
class DebRequires(DebDepends,Requires): pass
class DebOrDepends(Depends):
def __init__(self, nrv):
name = " | ".join([(x[2] and " ".join(x) or x[0]) for x in nrv])
Depends.__init__(self, name, None, None)
self._nrv = nrv
def getInitArgs(self):
return (self.__class__, self._nrv)
def getMatchNames(self):
return [x[0] for x in self._nrv]
def matches(self, prv):
if not isinstance(prv, DebProvides) and type(prv) is not Provides:
return False
for name, relation, version in self._nrv:
if name == prv.name:
if not version:
return True
if not prv.version:
continue
if checkdep(prv.version, relation, version):
return True
return False
def __reduce__(self):
return (self.__class__, (self._nrv,))
class DebOrRequires(DebOrDepends,Requires): pass
class DebOrPreRequires(DebOrDepends,PreRequires): pass
class DebUpgrades(DebDepends,Upgrades):
def matches(self, prv):
if not isinstance(prv, DebNameProvides) and type(prv) is not Provides:
return False
if not self.version or not prv.version:
return True
return checkdep(prv.version, self.relation, self.version)
class DebConflicts(DebDepends,Conflicts): pass
|
fuselock/odoo
|
refs/heads/8.0
|
openerp/conf/__init__.py
|
442
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Library-wide configuration variables.
For now, configuration code is in openerp.tools.config. It is in mainly
unprocessed form, e.g. addons_path is a string with commas-separated
paths. The aim is to have code related to configuration (command line
parsing, configuration file loading and saving, ...) in this module
and provide real Python variables, e.g. addons_paths is really a list
of paths.
To initialize properly this module, openerp.tools.config.parse_config()
must be used.
"""
import deprecation
# Paths to search for OpenERP addons.
addons_paths = []
# List of server-wide modules to load. Those modules are supposed to provide
# features not necessarily tied to a particular database. This is in contrast
# to modules that are always bound to a specific database when they are
# installed (i.e. the majority of OpenERP addons). This is set with the --load
# command-line option.
server_wide_modules = []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ataylor32/django
|
refs/heads/master
|
tests/reverse_lookup/tests.py
|
326
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Poll, User
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
|
sarlalian/ansible
|
refs/heads/devel
|
lib/ansible/template/__init__.py
|
2
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.utils import concat as j2_concat
from jinja2.runtime import StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
from ansible.plugins import _basedirs, filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.debug import debug
from numbers import Number
__all__ = ['Templar']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = ( bool, Number )
JINJA2_OVERRIDE = '#jinja2:'
def _preserve_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\','\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=dict()):
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = Environment(
trim_blocks=True,
undefined=StrictUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
self.environment.template_class = AnsibleJ2Template
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
def _count_newlines_from_end(self, in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
i = len(in_str)
while i > 0:
if in_str[i-1] != '\n':
break
i -= 1
return len(in_str) - i
def _get_filters(self):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
plugins = [x for x in self._filter_loader.all()]
self._filters = dict()
for fp in plugins:
self._filters.update(fp.filters())
self._filters.update(self._get_tests())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
plugins = [x for x in test_loader.all()]
self._tests = dict()
for fp in plugins:
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
def set_available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods.
'''
assert isinstance(variables, dict)
self._available_variables = variables.copy()
def template(self, variable, convert_bare=False, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None, convert_data=True):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, basestring):
result = variable
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
if convert_data:
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
return result
elif isinstance(variable, (list, tuple)):
return [self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
elif isinstance(variable, dict):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
d[k] = self.template(variable[k], convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def _contains_vars(self, data):
'''
returns True if the data contains a variable pattern
'''
return self.environment.block_start_string in data or self.environment.variable_start_string in data
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, basestring):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned
'''
return thing if thing is not None else ''
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
raise
ran = None
if ran:
ran = ",".join(ran)
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None):
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol+1:]
for pair in line.split(','):
(key,val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
#FIXME: add tests
myenv.filters.update(self._get_filters())
myenv.tests.update(self._get_tests())
data = _preserve_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s" % str(e))
except Exception as e:
if 'recursion' in str(e):
raise AnsibleError("recursive loop detected in template string: %s" % data)
else:
return data
t.globals['lookup'] = self._lookup
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
except TypeError as te:
if 'StrictUndefined' in str(te):
raise AnsibleUndefinedVariable(
"Unable to look up a name or access an attribute in template string. " + \
"Make sure your variable name does not contain invalid characters like '-'."
)
else:
debug("failing because of a type error, template data is: %s" % data)
raise AnsibleError("an unexpected type error occurred. Error was %s" % te)
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
res_newlines = self._count_newlines_from_end(res)
data_newlines = self._count_newlines_from_end(data)
if data_newlines > res_newlines:
res += '\n' * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
#TODO: return warning about undefined var
return data
|
TimeTravel-0/hatching
|
refs/heads/master
|
vectorize.py
|
1
|
#!/usr/bin/env python
#
# this is ugly, experimental, slow, unoptimized code.
# its purpose is to verify my (propably) stupid idea
# of an "image to hatching pen plotter drawing".
#
# for now the script finds edges and fills areas in between
# with colors
# edge detection works as expected, but drawn edges
# (black lines...) are not handled in a special way
# = handled like a normal area/fill = results in double
# lines detected for comic like input drawings
#
# motion vector recovery works ok-ish.
#
# this script now generates lots of data from an image file
# next, yet unimplemented, step is to combine all this data
# and generate polygon lines
#
# another, yet unimplemented, part is to write all the polygons
# out in HPGL form.
#
#
# see main() function for actual high level description of what is going on
#
# we start with a few simple image manipulation functions:
#
import sys
from lib_console import *
from lib_imagefile import *
from lib_colors import *
from lib_image_filters import *
from lib_paths import *
from lib_motionvectors import *
from lib_hpgl import *
def facewalk(img_in, mask, motionvectors):
'''trace hatching paths'''
mask_clone = pygame.Surface(mask.get_size())
mask_clone.blit(mask,(0,0))
radius = 5
paths = []
width, height = mask_clone.get_size()
while True:
path = []
position = find_pixel_with_color(mask_clone,(255,255,255))
if not position:
break
path.append(position)
mask_clone.set_at(position,(0,0,0)) # deactivate pixel
# now, find next pixel
while True:
vector = interpolate_motionvectors(motionvectors,position)
angle = vector[1]
#print "!!!",vector
dp = [math.cos(angle*2*math.pi/360)*radius,math.sin(angle*2*math.pi/360)*radius]
position = [position[0]+int(dp[0]),position[1]+int(dp[1])]
if position[0]<0 or position[0]>width-1 or position[1]<0 or position[1]>height-1:
break
if mask_clone.get_at(position) == (0,0,0):
break
path.append(position)
mask_clone.set_at(position,(0,0,0))
pygame.draw.circle(mask_clone, (0,0,0), position, radius-1, 0)
paths.append(path)
return paths
def main(input_file,mode=""):
'''main routine'''
if len(sys.argv)>=2: # 1 parameters
pygame.init()
# the idea is as follows:
# 1. find/mark edges, because edges mark areas
img_in = image_load(input_file)
display = image_gui(img_in.get_size())
image_show(display, img_in, False)
#img_blurx = lazyblur(img_in,3)
#image_show(display,img_blurx, True)
#time.sleep(5)
# a) first we run an median filter to get rid of noise but keep edges
# as the median filter already gets a list of all pixels around the analyzed coordinate
# it got an additional part to calculate the contrast.
if mode != "bw":
img_median, img_border = median(img_in,3,"c",3)
image_show(display, img_median, True)
image_show(display, img_border, True)
image_save(img_border,fn_comb(input_file,"borderm"))
image_save(img_median,fn_comb(input_file,"median"))
img_in = img_median
else:
# just create inverted image...
img_white = image_create(img_in.get_size(),(255,255,255))
img_border = addmul(img_in,img_white,1.0,-1.0)
image_show(display, img_border, True)
#img_in = img_median
img_blend = img_border
# e) create blured image (average of local area)
img_blur = img_blend
for i in range(0,3):
img_blur = lazyblur(img_blur, 3)
image_show(display, img_blur, False)
# f) unblured - blured edge image = better image for threshold usage (adapts to local variations)
img_blurdif = addmul(img_blend, img_blur, -1)
image_show(display, img_blurdif, True)
# j) walk the line
edgepaths = edgewalk(img_blurdif)
img_edgepath = image_render_paths(edgepaths,img_in.get_size())
image_show(display, img_edgepath, True)
image_save(img_edgepath,fn_comb(sys.argv[1],"epath"))
c = hpgl_usepen(1,(0,0,0))
c+= hpgl_frompaths(edgepaths)
hpgl_tofile(c, fn_comb(sys.argv[1],"epath","hpgl"))
# g) bolden edges
img_bold = bolden(img_blurdif,1)
image_show(display, img_bold, True)
# h) convert to black and white via limit
img_bnw = blacknwhite(img_bold,12)
image_show(display, img_bnw, True)
image_show(display, img_bnw, True)
# i) isles smaller than limit get eliminated
while True:
position = find_pixel_with_color(img_bnw,(255,255,255))
if not position:
break
pixelcount = floodfill(img_bnw, position, (128,128,128))
if pixelcount < 100:
print "isle at %s, %i pixels below limit"%(str(position), pixelcount)
floodfill(img_bnw, position, (0,0,0)) # eliminate it
img_bnw = blacknwhite(img_bnw,4)
# 2. for the room between edges: flood fill until no space left
facecount = 0
while True:
position = find_pixel_with_color(img_bnw,(0,0,0))
if not position:
break
#print position
pixelcount = floodfill(img_bnw, position, id_to_color(facecount))
if pixelcount < 25:
print "isle at %s, %i pixels below limit"%(str(position), pixelcount)
floodfill(img_bnw, position, (255,255,255))
else:
facecount+=1
image_show(display, img_bnw, False)
print "filled %i faces"%facecount
# 3. with each flood fill a seperate area/mask is defined
masks = []
masks_drawtmp = pygame.Surface(img_bnw.get_size())
for i in range(0,facecount):
masks.append(bolden( mask(img_bnw,id_to_color(i)) ,2))
masks_drawtmp = blend(masks_drawtmp, masks[i])
image_show(display, masks_drawtmp, False)
# 4. get average brightness from this area
masked_originals = []
masked_originals_drawtmp = pygame.Surface(img_bnw.get_size())
avgcolors = []
for i in range(0,facecount):
avgcolor = get_avg_color(img_in, masks[i])
avgcolors.append(avgcolor)
print avgcolor
masked_originals.append( multiply( image_create(img_bnw.get_size(),avgcolor), masks[i]) )
#masked_originals.append(multiply(masks[i],img_in))
masked_originals_drawtmp = blend(masked_originals_drawtmp, masked_originals[i])
image_show(display, masked_originals_drawtmp, False)
image_save(masked_originals_drawtmp,fn_comb(sys.argv[1],"tmp"))
# 5. motion vector find
motionvector_drawtmp = pygame.Surface(img_bnw.get_size())
motionvectorss = []
for i in range(0,facecount):
print "motion vector face %i"%i
motionvectors = motionsfind(img_in, bolden(masks[i],5),10) # 10px radius
motionvectorss.append(motionvectors)
cormax = 0
corvar_max = 0
for vector in motionvectors:
pos,ang,ofs,cor,corvar = vector
# correlation: smaller = smaller difference in picture comparison 0 = identical, which is good
# corvar: correlation variance, bigger = better because we dont just probe signle colored surface...
#
# correlation needs normalisation
if cormax<cor:
cormax=cor
if corvar_max<corvar:
corvar_max=corvar
for vector in motionvectors:
pos,ang,ofs,cor,corvar = vector
# brightness of contrast image correlates to "roughness" at this location
#roughness = float(255-max(img_blur.get_at(pos)[:2]))/255
rel_cor = float(cor)/float(cormax)
rel_corvar = float(corvar)/float(corvar_max)
ofs=ofs*(1-rel_cor) * rel_corvar
vector.append(ofs)
# ofs+=1
if ofs>0:
endpos = 10*math.cos(ang*2*math.pi/360)*ofs+pos[0],10*math.sin(ang*2*math.pi/360)*ofs+pos[1]
pygame.draw.line(motionvector_drawtmp, avgcolors[i], pos, endpos, 1)
image_show(display, motionvector_drawtmp, True)
image_save(motionvector_drawtmp,fn_comb(sys.argv[1],"vector"))
combv = []
for motionvectors in motionvectorss: # combine all gathered motion vectors
combv+=motionvectors
#print combv
motionvector_r = motionvector_rainbow(combv,img_bnw.get_size()) # calculate rainbow image from motion vectors
image_show(display, motionvector_r, True)
image_save(motionvector_r,fn_comb(sys.argv[1],"vectom"))
# 6. generate strokes/hatching for each area. it is not necessary to know the area outline as polygon, just check the individual pixels
# buggy!
img_strokepath = pygame.Surface(img_in.get_size())
strokepathss = []
#if False:
for i in range(0,len(masks)):
strokepaths = facewalk(img_in, masks[i], motionvectorss[i])
for polygon in strokepaths:
lastpoint = False
if len(polygon)>1:
for point in polygon:
if not lastpoint:
lastpoint = point
#pygame.draw.circle(img_strokepath, (0,255,0),point, 3)
pygame.draw.line(img_strokepath, avgcolors[i], lastpoint, point, 1)
lastpoint = point
strokepathss.append(strokepaths)
image_show(display, img_strokepath, True)
image_save(img_strokepath,fn_comb(sys.argv[1],"fpath"))
pygame.display.flip()
time.sleep(10)
if __name__ == "__main__":
import pygame
import sys
import time
import math
import random
if len(sys.argv) == 2:
main(sys.argv[1])
if len(sys.argv) == 3:
main(sys.argv[1],sys.argv[2])
|
rackerlabs/pelican-events
|
refs/heads/master
|
pelican_events/contents.py
|
1
|
from pelican.contents import Page
class Event(Page):
"""
An Event is meeting, conference, or other occurrence which can be displayed
and indexed on a static site generation. It is intended to be used with the
JSON generator and custom templates.
"""
base_properties = ('starts','ends','title','event_type','description','location')
mandatory_properties = ('starts','ends','title','event_type','location')
default_template = 'event'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.