repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ahmetdaglarbas/e-commerce
|
refs/heads/tez
|
oscar/apps/dashboard/pages/config.py
|
58
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PagesDashboardConfig(AppConfig):
label = 'pages_dashboard'
name = 'oscar.apps.dashboard.pages'
verbose_name = _('Pages dashboard')
|
DemocracyLab/CivicTechExchange
|
refs/heads/master
|
oauth2/app_settings.py
|
11
|
class AppSettings(object):
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def QUERY_EMAIL(self):
"""
Request e-mail address from 3rd party account provider?
E.g. using OpenID AX
"""
from allauth.account import app_settings as account_settings
return self._setting("QUERY_EMAIL",
account_settings.EMAIL_REQUIRED)
@property
def AUTO_SIGNUP(self):
"""
Attempt to bypass the signup form by using fields (e.g. username,
email) retrieved from the social account provider. If a conflict
arises due to a duplicate e-mail signup form will still kick in.
"""
return self._setting("AUTO_SIGNUP", True)
@property
def PROVIDERS(self):
"""
Provider specific settings
"""
return self._setting("PROVIDERS", {})
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_VERIFICATION",
account_settings.EMAIL_VERIFICATION)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.socialaccount.adapter'
'.DefaultSocialAccountAdapter')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def STORE_TOKENS(self):
return self._setting('STORE_TOKENS', True)
@property
def UID_MAX_LENGTH(self):
return 191
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys # noqa
app_settings = AppSettings('SOCIALACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
|
MatthewWilkes/django
|
refs/heads/master
|
django/contrib/gis/db/models/sql/aggregates.py
|
298
|
from django.db.models.sql import aggregates
from django.db.models.sql.aggregates import * # NOQA
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'] + aggregates.__all__
warnings.warn(
"django.contrib.gis.db.models.sql.aggregates is deprecated. Use "
"django.contrib.gis.db.models.aggregates instead.",
RemovedInDjango110Warning, stacklevel=2)
|
idea4bsd/idea4bsd
|
refs/heads/idea4bsd-master
|
python/testData/inspections/PyUnresolvedReferencesInspection/percentStringKeywordTupleArgument.py
|
8
|
"%(<warning descr="Unresolved reference 'foo'">foo</warning>)s" % (1,2,3)
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/testData/refactoring/introduceVariable/py2862.after.py
|
70
|
a = '/import/' + urllib.quote(projectId) + '/issues?' + urllib.urlencode({'assigneeGroup': assigneeGroup})
response = self._reqXml('PUT', a,
xml, 400).toxml().encode('utf-8')
|
frippe12573/geonode
|
refs/heads/master
|
geonode/layers/__init__.py
|
396
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
|
ledtvavs/repository.ledtv
|
refs/heads/master
|
script.module.liveresolver/lib/liveresolver/modules/f4mproxy/utils/openssl_aes.py
|
202
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto AES implementation."""
from .cryptomath import *
from .aes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_AES(key, mode, IV)
class OpenSSL_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
if len(self.key)==16:
cipherType = m2.aes_128_cbc()
if len(self.key)==24:
cipherType = m2.aes_192_cbc()
if len(self.key)==32:
cipherType = m2.aes_256_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will discard it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext)
|
Scoppio/agazeta-rest
|
refs/heads/master
|
arquivo/services/tobTokenServices.py
|
1
|
# This service captures the data using Dora R. as base
# coding: utf-8
import os
import json
import logging
import requests
from arquivo.dao import tobTokenDAO
from django.db.utils import IntegrityError
from settings.base import MINNING_URLS
logger = logging.getLogger('sentry.errors')
def createTobToken(username, token, server, is_active=True):
tobToken = tobTokenDAO.tobToken(username=username, token=token, server=server, is_active=is_active)
try:
tobToken.save()
logger.info("Created TobToken for username %s", username)
except Exception as e:
logger.info("TobToken for username %s failed to be saved", username)
raise IntegrityError("TobToken for username " + username + " failed to be saved")
return tobToken
def createTobTokenAndInviteUserToRegister(server, email, username, token, subed, valid):
# tobToken = createTobToken(server=server, email=email, token=token, username=username, subed=subed)
# _email = "" if type(email) != str else email
# TODO: Add invitation system to this system
# logger.info("Created User % for email %s", _email)
raise NotImplementedError
def verifyTobToken(username=None, token=None, tobToken=None):
if tobToken:
username=tobToken.username
token=tobToken.token
url = MINNING_URLS["Track-o-Bot"]
try:
response = requests.get(url, data={'page': 1, 'username': username, 'token': token})
except Exception as e:
logger.error("Failed to capture data from {}".format(url))
try:
assert response.status_code == 200
data = json.loads(response.text)
if len(data['meta'].keys()):
logger.info("Token %s is valid", username)
return True
else:
logger.info("Token %s is invalid", username)
return False
except AssertionError as e:
if response.status_code == 401:
logger.info("The token %s is invalid", username)
return False
else:
logger.error("The url responded with code %d", response.status_code)
return None
except Exception as e:
logger.error("An unexpected error occurred when trying to read the json from %s", username)
return None
def addTokenToUser(tobToken, user):
tobToken.user=user
tobToken.save()
def getValidTobTokensYield(limit : int=0):
"""Get username, token and account from database"""
sqlret = tobTokenDAO.findAllValidTokens(limit)
logger.debug("{} tob tokens retrieved from database".format(len(sqlret)))
for i, row in enumerate(sqlret):
logger.debug("tob token {} of {} - username {} - account {}".format(i, len(sqlret), row.username, row.user))
yield row
def getAllTobTokensYield(limit : int=0):
"""Get username, token and account from database"""
sqlret = tobTokenDAO.findAll(limit)
logger.debug("{} tob tokens retrieved from database".format(len(sqlret)))
for i, row in enumerate(sqlret):
logger.debug("tob token {} of {} - username {} - account {}".format(i, len(sqlret), row.username, row.user))
yield row
def getAllTobTokens():
return tobTokenDAO.findAll()
|
Big-B702/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_netrc.py
|
63
|
import netrc, os, unittest, sys, textwrap
from test import support
temp_filename = support.TESTFN
class NetrcTestCase(unittest.TestCase):
def tearDown(self):
os.unlink(temp_filename)
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
def test_main():
support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
|
devaha/archagent
|
refs/heads/master
|
node_modules/grunt-plugin/node_modules/npm/node_modules/node-gyp/gyp/test/win/gyptest-link-entrypointsymbol.py
|
342
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure entrypointsymbol setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('entrypointsymbol.gyp', chdir=CHDIR)
test.build('entrypointsymbol.gyp', 'test_ok', chdir=CHDIR)
test.build('entrypointsymbol.gyp', 'test_fail', chdir=CHDIR, status=1)
test.pass_test()
|
alexallah/django
|
refs/heads/master
|
tests/migrations2/test_migrations_2/0001_initial.py
|
149
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0002_second")]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
|
Vassy/odoo
|
refs/heads/master
|
addons/web_kanban_gauge/__openerp__.py
|
428
|
{
'name': 'Gauge Widget for Kanban',
'category': 'Hidden',
'description': """
This widget allows to display gauges using justgage library.
""",
'version': '1.0',
'depends': ['web_kanban'],
'data' : [
'views/web_kanban_gauge.xml',
],
'qweb': [
],
'auto_install': True,
}
|
pylixm/sae-django-demo
|
refs/heads/master
|
django1.7-sae/site-packages/django/contrib/comments/moderation.py
|
60
|
"""
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.shortcuts import get_current_site
from django.utils import timezone
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({'comment': comment,
'content_object': content_object})
subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.model_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.model_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
|
ThirdProject/android_external_chromium_org
|
refs/heads/cm-11.0
|
tools/android/find_unused_resources.py
|
146
|
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Lists unused Java strings and other resources."""
import optparse
import re
import subprocess
import sys
def GetLibraryResources(r_txt_paths):
"""Returns the resources packaged in a list of libraries.
Args:
r_txt_paths: paths to each library's generated R.txt file which lists the
resources it contains.
Returns:
The resources in the libraries as a list of tuples (type, name). Example:
[('drawable', 'arrow'), ('layout', 'month_picker'), ...]
"""
resources = []
for r_txt_path in r_txt_paths:
with open(r_txt_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
data_type, res_type, name, _ = line.split(None, 3)
assert data_type in ('int', 'int[]')
# Hide attrs, which are redundant with styleables and always appear
# unused, and hide ids, which are innocuous even if unused.
if res_type in ('attr', 'id'):
continue
resources.append((res_type, name))
return resources
def GetUsedResources(source_paths, resource_types):
"""Returns the types and names of resources used in Java or resource files.
Args:
source_paths: a list of files or folders collectively containing all the
Java files, resource files, and the AndroidManifest.xml.
resource_types: a list of resource types to look for. Example:
['string', 'drawable']
Returns:
The resources referenced by the Java and resource files as a list of tuples
(type, name). Example:
[('drawable', 'app_icon'), ('layout', 'month_picker'), ...]
"""
type_regex = '|'.join(map(re.escape, resource_types))
patterns = [r'@(())(%s)/(\w+)' % type_regex,
r'\b((\w+\.)*)R\.(%s)\.(\w+)' % type_regex]
resources = []
for pattern in patterns:
p = subprocess.Popen(
['grep', '-REIhoe', pattern] + source_paths,
stdout=subprocess.PIPE)
grep_out, grep_err = p.communicate()
# Check stderr instead of return code, since return code is 1 when no
# matches are found.
assert not grep_err, 'grep failed'
matches = re.finditer(pattern, grep_out)
for match in matches:
package = match.group(1)
if package == 'android.':
continue
type_ = match.group(3)
name = match.group(4)
resources.append((type_, name))
return resources
def FormatResources(resources):
"""Formats a list of resources for printing.
Args:
resources: a list of resources, given as (type, name) tuples.
"""
return '\n'.join(['%-12s %s' % (t, n) for t, n in sorted(resources)])
def ParseArgs(args):
parser = optparse.OptionParser()
parser.add_option('-v', help='Show verbose output', action='store_true')
parser.add_option('-s', '--source-path', help='Specify a source folder path '
'(e.g. ui/android/java)', action='append', default=[])
parser.add_option('-r', '--r-txt-path', help='Specify a "first-party" R.txt '
'file (e.g. out/Debug/content_shell_apk/R.txt)',
action='append', default=[])
parser.add_option('-t', '--third-party-r-txt-path', help='Specify an R.txt '
'file for a third party library', action='append',
default=[])
options, args = parser.parse_args(args=args)
if args:
parser.error('positional arguments not allowed')
if not options.source_path:
parser.error('at least one source folder path must be specified with -s')
if not options.r_txt_path:
parser.error('at least one R.txt path must be specified with -r')
return (options.v, options.source_path, options.r_txt_path,
options.third_party_r_txt_path)
def main(args=None):
verbose, source_paths, r_txt_paths, third_party_r_txt_paths = ParseArgs(args)
defined_resources = (set(GetLibraryResources(r_txt_paths)) -
set(GetLibraryResources(third_party_r_txt_paths)))
resource_types = list(set([r[0] for r in defined_resources]))
used_resources = set(GetUsedResources(source_paths, resource_types))
unused_resources = defined_resources - used_resources
undefined_resources = used_resources - defined_resources
# aapt dump fails silently. Notify the user if things look wrong.
if not defined_resources:
print >> sys.stderr, (
'Warning: No resources found. Did you provide the correct R.txt paths?')
if not used_resources:
print >> sys.stderr, (
'Warning: No resources referenced from Java or resource files. Did you '
'provide the correct source paths?')
if undefined_resources:
print >> sys.stderr, (
'Warning: found %d "undefined" resources that are referenced by Java '
'files or by other resources, but are not defined anywhere. Run with '
'-v to see them.' % len(undefined_resources))
if verbose:
print '%d undefined resources:' % len(undefined_resources)
print FormatResources(undefined_resources), '\n'
print '%d resources defined:' % len(defined_resources)
print FormatResources(defined_resources), '\n'
print '%d used resources:' % len(used_resources)
print FormatResources(used_resources), '\n'
print '%d unused resources:' % len(unused_resources)
print FormatResources(unused_resources)
if __name__ == '__main__':
main()
|
adrianholovaty/django
|
refs/heads/master
|
django/contrib/gis/gdal/tests/test_geom.py
|
11
|
from binascii import b2a_hex
try:
import cPickle as pickle
except ImportError:
import pickle
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType, OGRException,
OGRIndexError, SpatialReference, CoordTransform, GDAL_VERSION)
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils import unittest
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print("\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n")
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print("\nEND - expecting IllegalArgumentException; safe to ignore.\n")
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
if GDAL_VERSION <= (1, 4, 1): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
163gal/Time-Line
|
refs/heads/master
|
libs/wxPython/lib/ClickableHtmlWindow.py
|
6
|
## This file imports items from the wx package into the wxPython package for
## backwards compatibility. Some names will also have a 'wx' added on if
## that is how they used to be named in the old wxPython package.
import wx.lib.ClickableHtmlWindow
__doc__ = wx.lib.ClickableHtmlWindow.__doc__
wxPyClickableHtmlWindow = wx.lib.ClickableHtmlWindow.PyClickableHtmlWindow
|
Distrotech/qtwebkit
|
refs/heads/distrotech-qtwebkit
|
Tools/Scripts/webkitpy/common/editdistance.py
|
138
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from array import array
def edit_distance(str1, str2):
unsignedShort = 'h'
distances = [array(unsignedShort, (0,) * (len(str2) + 1)) for i in range(0, len(str1) + 1)]
# distances[0][0] = 0 since distance between str1[:0] and str2[:0] is 0
for i in range(1, len(str1) + 1):
distances[i][0] = i # Distance between str1[:i] and str2[:0] is i
for j in range(1, len(str2) + 1):
distances[0][j] = j # Distance between str1[:0] and str2[:j] is j
for i in range(0, len(str1)):
for j in range(0, len(str2)):
diff = 0 if str1[i] == str2[j] else 1
# Deletion, Insertion, Identical / Replacement
distances[i + 1][j + 1] = min(distances[i + 1][j] + 1, distances[i][j + 1] + 1, distances[i][j] + diff)
return distances[len(str1)][len(str2)]
|
saghul/gyn
|
refs/heads/master
|
test/win/gyptest-link-incremental.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure incremental linking setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('incremental.gyp', chdir=CHDIR)
test.build('incremental.gyp', test.ALL, chdir=CHDIR)
def HasILTTables(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
return '@ILT+' in output
# Default or unset is to be on.
if not HasILTTables('test_incremental_unset.exe'):
test.fail_test()
if not HasILTTables('test_incremental_default.exe'):
test.fail_test()
if HasILTTables('test_incremental_no.exe'):
test.fail_test()
if not HasILTTables('test_incremental_yes.exe'):
test.fail_test()
test.pass_test()
|
jorik041/scrapy
|
refs/heads/master
|
scrapy/core/downloader/handlers/http.py
|
144
|
from scrapy import optional_features
from .http10 import HTTP10DownloadHandler
if 'http11' in optional_features:
from .http11 import HTTP11DownloadHandler as HTTPDownloadHandler
else:
HTTPDownloadHandler = HTTP10DownloadHandler
# backwards compatibility
class HttpDownloadHandler(HTTP10DownloadHandler):
def __init__(self, *args, **kwargs):
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn('HttpDownloadHandler is deprecated, import scrapy.core.downloader'
'.handlers.http10.HTTP10DownloadHandler instead',
category=ScrapyDeprecationWarning, stacklevel=1)
super(HttpDownloadHandler, self).__init__(*args, **kwargs)
|
isrohutamahopetechnik/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/fft/fftpack_lite.py
|
53
|
import sys
if sys.platform == 'cli':
import clr
clr.AddReference("fftpack_lite")
from numpy__fft__fftpack_cython import *
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/croston/test_croston_fpp2_counts_example.py
|
1
|
import pandas as pd
def create_dataset():
lCounts = "0 2 0 1 0 11 0 0 0 0 2 0 6 3 0 0 0 0 0 7 0 0 0 0 0 0 0 3 1 0 0 1 0 1 0 0".split()
lCounts = [float(c) for c in lCounts]
N = len(lCounts)
lDates = pd.date_range(start="2000-01-01", periods=N, freq='m')
df = pd.DataFrame({"Date" : lDates, "Signal" : lCounts})
return df
def create_model(croston_type):
df_train = create_dataset()
# print(df_train.head(N))
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.mOptions.set_active_trends(['ConstantTrend', 'LinearTrend'])
lEngine.mOptions.set_active_periodics(['None'])
lEngine.mOptions.set_active_transformations(['None'])
lEngine.mOptions.set_active_autoregressions(['CROSTON'])
lEngine.mOptions.mModelSelection_Criterion = "L2";
lEngine.mOptions.mCrostonOptions.mMethod = croston_type
lEngine.mOptions.mCrostonOptions.mZeroRate = 0.0
# get the best time series model for predicting one week
lEngine.train(iInputDS = df_train, iTime = 'Date', iSignal = 'Signal', iHorizon = 7);
lEngine.getModelInfo()
lName = "outputs/fpp2_croston_" + str(croston_type) + "_"
lEngine.standardPlots(lName);
# predict one week
df_forecast = lEngine.forecast(iInputDS = df_train, iHorizon = 7)
# list the columns of the forecast dataset
print(df_forecast.columns) #
cols = ['Date', 'Signal', '_Signal',
'_Signal_TransformedForecast', 'Signal_Forecast']
# print the real forecasts
print(df_forecast[cols].tail(12))
print(df_forecast['Signal'].describe())
print(df_forecast['Signal_Forecast'].describe())
create_model(None)
|
skosukhin/spack
|
refs/heads/esiwace
|
lib/spack/docs/tutorial/examples/Cmake/0.package.py
|
1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install callpath
#
# You can edit this file again by typing:
#
# spack edit callpath
#
# See the Spack documentation for more information on packaging.
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
from spack import *
class Callpath(CMakePackage):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "http://www.example.com"
url = "https://github.com/llnl/callpath/archive/v1.0.1.tar.gz"
version('1.0.3', 'c89089b3f1c1ba47b09b8508a574294a')
# FIXME: Add dependencies if required.
# depends_on('foo')
def cmake_args(self):
# FIXME: Add arguments other than
# FIXME: CMAKE_INSTALL_PREFIX and CMAKE_BUILD_TYPE
# FIXME: If not needed delete this function
args = []
return args
|
Ali-aqrabawi/ezclinic
|
refs/heads/master
|
lib/django/forms/models.py
|
27
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and
form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name))):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages,
opts.field_classes)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset.all()
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) + (1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(
queryset, None, required, widget, label, initial, help_text,
*args, **kwargs
)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
|
pschwartz/ansible
|
refs/heads/devel
|
test/units/plugins/action/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
voriux/Flexget
|
refs/heads/develop
|
flexget/plugins/__init__.py
|
44
|
"""Standard plugin package."""
|
snoopycrimecop/openmicroscopy
|
refs/heads/merge_ci
|
components/tools/OmeroPy/manualtests/populate_roi_test.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
...
"""
#
# Copyright (C) 2009 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import os
from omero.util.populate_roi import AbstractPlateAnalysisCtx
from omero.util.populate_roi import MIASPlateAnalysisCtx
from omero.util.populate_roi import FlexPlateAnalysisCtx
from omero.util.populate_roi import InCellPlateAnalysisCtx
from omero.rtypes import rstring, rint
from omero.model import OriginalFileI, ImageI, WellI, WellSampleI
class TestingServiceFactory(object):
"""
Testing service factory implementation.
"""
def getUpdateService(self):
return None
def getQueryService(self):
return None
class FromFileOriginalFileProvider(object):
"""
Provides a testing original file provider which provides file data
directly from disk.
"""
def __init__(self, service_factory):
pass
def get_original_file_data(self, original_file):
"""Returns a file handle to the path of the original file."""
return open(original_file.path.val)
class MIASParseRoiTest(unittest.TestCase):
LOG_FILE = "NEOlog2008-09-18-14h37m07s.txt"
RESULT_FILE = "Well0001_mode1_z000_t000_detail_2008-09-18-10h48m54s.txt"
ROOT = "/Users/callan/testimages/siRNA_PRIM1_03102008/"\
"001-365700055641/results/"
def setUp(self):
AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \
FromFileOriginalFileProvider
original_files = list()
# Create our container images and an original file image map
images = list()
n_images = 0
for row in range(16):
for column in range(24):
well = WellI(n_images, True)
well.column = rint(column)
well.row = rint(row)
well_sample = WellSampleI(n_images, True)
well_sample.well = well
image = ImageI(n_images, True)
image.addWellSample(well_sample)
images.append(image)
original_file_image_map = dict()
# Our required original file format
format = rstring('Companion/MIAS')
# Create original file representing the log file
o = OriginalFileI(1, True)
o.name = rstring(self.LOG_FILE)
o.path = rstring(os.path.join(self.ROOT, self.LOG_FILE))
o.mimetype = format
original_files.append(o) # [1] = o
original_file_image_map[1] = images[0]
# Create original file representing the result file
o = OriginalFileI(2, True)
o.name = rstring(self.RESULT_FILE)
o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE))
o.mimetype = format
original_files.append(o) # [2] = o
original_file_image_map[2] = images[0]
sf = TestingServiceFactory()
self.analysis_ctx = MIASPlateAnalysisCtx(
images, original_files, original_file_image_map, 1, sf)
def test_get_measurement_ctx(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
self.assertNotEqual(None, ctx)
def test_get_columns(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
columns = ctx.parse()
self.assertNotEqual(None, columns)
self.assertEqual(9, len(columns))
self.assertEqual('Image', columns[0].name)
self.assertEqual('ROI', columns[1].name)
self.assertEqual('Label', columns[2].name)
self.assertEqual('Row', columns[3].name)
self.assertEqual('Col', columns[4].name)
self.assertEqual('Nucleus Area', columns[5].name)
self.assertEqual('Cell Diam.', columns[6].name)
self.assertEqual('Cell Type', columns[7].name)
self.assertEqual('Mean Nucleus Intens.', columns[8].name)
for column in columns:
if column.name == "ROI":
continue
self.assertEqual(173, len(column.values))
class FlexParseRoiTest(unittest.TestCase):
ROOT = "/Users/callan/testimages/"
RESULT_FILE = "An_02_Me01_12132846(2009-06-17_11-56-17).res"
def setUp(self):
AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \
FromFileOriginalFileProvider
original_files = list()
# Create our container images and an original file image map
images = list()
n_images = 0
for row in range(16):
for column in range(24):
well = WellI(n_images, True)
well.column = rint(column)
well.row = rint(row)
well_sample = WellSampleI(n_images, True)
well_sample.well = well
image = ImageI(n_images, True)
image.addWellSample(well_sample)
images.append(image)
original_file_image_map = dict()
# Our required original file format
format = rstring('Companion/Flex')
# Create original file representing the result file
o = OriginalFileI(1, True)
o.name = rstring(self.RESULT_FILE)
o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE))
o.mimetype = format
original_files.append(o) # [1] = o
original_file_image_map[1] = images[0]
sf = TestingServiceFactory()
self.analysis_ctx = FlexPlateAnalysisCtx(
images, original_files, original_file_image_map, 1, sf)
def test_get_measurement_ctx(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
self.assertNotEqual(None, ctx)
def test_get_columns(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
columns = ctx.parse()
self.assertNotEqual(None, columns)
self.assertEqual(50, len(columns))
for column in columns:
self.assertEqual(384, len(column.values))
class InCellParseRoiTest(unittest.TestCase):
ROOT = "/Users/callan/testimages"
RESULT_FILE = "Mara_488 and hoechst_P-HisH3.xml"
def setUp(self):
AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \
FromFileOriginalFileProvider
original_files = list()
# Create our container images and an original file image map
images = list()
n_images = 0
for row in range(16):
for column in range(24):
well = WellI(n_images, True)
well.column = rint(column)
well.row = rint(row)
well_sample = WellSampleI(n_images, True)
well_sample.well = well
image = ImageI(n_images, True)
image.addWellSample(well_sample)
images.append(image)
original_file_image_map = dict()
# Our required original file format
format = rstring('Companion/InCell')
# Create original file representing the result file
o = OriginalFileI(1, True)
o.name = rstring(self.RESULT_FILE)
o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE))
o.mimetype = format
original_files.append(o) # [1] = o
original_file_image_map[1] = image
sf = TestingServiceFactory()
self.analysis_ctx = InCellPlateAnalysisCtx(
images, original_files, original_file_image_map, 1, sf)
def test_get_measurement_ctx(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
self.assertNotEqual(None, ctx)
def test_get_columns(self):
ctx = self.analysis_ctx.get_measurement_ctx(0)
columns = ctx.parse()
self.assertNotEqual(None, columns)
for column in columns:
print('Column: %s' % column.name)
self.assertEqual(33, len(columns))
for column in columns:
self.assertEqual(114149, len(column.values))
if __name__ == '__main__':
unittest.main()
|
kennedyshead/home-assistant
|
refs/heads/dev
|
tests/components/deconz/test_binary_sensor.py
|
2
|
"""deCONZ binary sensor platform tests."""
from unittest.mock import patch
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_VIBRATION,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DOMAIN as DECONZ_DOMAIN,
)
from homeassistant.components.deconz.services import SERVICE_DEVICE_REFRESH
from homeassistant.const import (
ATTR_DEVICE_CLASS,
DEVICE_CLASS_TEMPERATURE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity_registry import async_entries_for_config_entry
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_request,
setup_deconz_integration,
)
async def test_no_binary_sensors(hass, aioclient_mock):
"""Test that no sensors in deconz results in no sensor entities."""
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
async def test_binary_sensors(hass, aioclient_mock, mock_deconz_websocket):
"""Test successful creation of binary sensor entities."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"dark": False, "presence": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "Temperature sensor",
"type": "ZHATemperature",
"state": {"temperature": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"name": "CLIP presence sensor",
"type": "CLIPPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"name": "Vibration sensor",
"type": "ZHAVibration",
"state": {
"orientation": [1, 2, 3],
"tiltangle": 36,
"vibration": True,
"vibrationstrength": 10,
},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 5
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == STATE_OFF
assert presence_sensor.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_MOTION
presence_temp = hass.states.get("sensor.presence_sensor_temperature")
assert presence_temp.state == "0.1"
assert presence_temp.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_TEMPERATURE
assert hass.states.get("binary_sensor.temperature_sensor") is None
assert hass.states.get("binary_sensor.clip_presence_sensor") is None
vibration_sensor = hass.states.get("binary_sensor.vibration_sensor")
assert vibration_sensor.state == STATE_ON
assert vibration_sensor.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_VIBRATION
vibration_temp = hass.states.get("sensor.vibration_sensor_temperature")
assert vibration_temp.state == "0.1"
assert vibration_temp.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_TEMPERATURE
event_changed_sensor = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"presence": True},
}
await mock_deconz_websocket(data=event_changed_sensor)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_ON
await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_tampering_sensor(hass, aioclient_mock, mock_deconz_websocket):
"""Verify tampering sensor works."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"dark": False, "presence": False, "tampered": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 3
presence_tamper = hass.states.get("binary_sensor.presence_sensor_tampered")
assert presence_tamper.state == STATE_OFF
assert presence_tamper.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_PROBLEM
event_changed_sensor = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"tampered": True},
}
await mock_deconz_websocket(data=event_changed_sensor)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.presence_sensor_tampered").state == STATE_ON
await hass.config_entries.async_unload(config_entry.entry_id)
assert (
hass.states.get("binary_sensor.presence_sensor_tampered").state
== STATE_UNAVAILABLE
)
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_allow_clip_sensor(hass, aioclient_mock):
"""Test that CLIP sensors can be allowed."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "CLIP presence sensor",
"type": "CLIPPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(
hass, aioclient_mock, options={CONF_ALLOW_CLIP_SENSOR: True}
)
assert len(hass.states.async_all()) == 2
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_OFF
assert hass.states.get("binary_sensor.clip_presence_sensor").state == STATE_OFF
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert not hass.states.get("binary_sensor.clip_presence_sensor")
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("binary_sensor.clip_presence_sensor").state == STATE_OFF
async def test_add_new_binary_sensor(hass, aioclient_mock, mock_deconz_websocket):
"""Test that adding a new binary sensor works."""
event_added_sensor = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": {
"id": "Presence sensor id",
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
}
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
await mock_deconz_websocket(data=event_added_sensor)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_OFF
async def test_add_new_binary_sensor_ignored(
hass, aioclient_mock, mock_deconz_websocket
):
"""Test that adding a new binary sensor is not allowed."""
sensor = {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
}
event_added_sensor = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": sensor,
}
config_entry = await setup_deconz_integration(
hass,
aioclient_mock,
options={CONF_MASTER_GATEWAY: True, CONF_ALLOW_NEW_DEVICES: False},
)
assert len(hass.states.async_all()) == 0
await mock_deconz_websocket(data=event_added_sensor)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
assert not hass.states.get("binary_sensor.presence_sensor")
entity_registry = er.async_get(hass)
assert (
len(async_entries_for_config_entry(entity_registry, config_entry.entry_id)) == 0
)
aioclient_mock.clear_requests()
data = {"groups": {}, "lights": {}, "sensors": {"1": sensor}}
mock_deconz_request(aioclient_mock, config_entry.data, data)
await hass.services.async_call(DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.presence_sensor")
|
lamby/live-studio
|
refs/heads/master
|
contrib/django/contrib/sessions/models.py
|
146
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
return SessionStore().decode(self.session_data)
# At bottom to avoid circular import
from django.contrib.sessions.backends.db import SessionStore
|
frouty/odoogoeen
|
refs/heads/prod
|
addons/portal_stock/__openerp__.py
|
437
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Stock',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds access rules to your portal if stock and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['sale_stock','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mccarrmb/moztrap
|
refs/heads/master
|
moztrap/view/runtests/finders.py
|
5
|
"""
Finder for running tests.
"""
from django.core.urlresolvers import reverse
from ... import model
from ..lists import finder
class RunTestsFinder(finder.Finder):
template_base = "runtests/finder"
columns = [
finder.Column(
"products",
"_products.html",
model.Product.objects.order_by("name"),
),
finder.Column(
"productversions",
"_productversions.html",
model.ProductVersion.objects.all(),
),
finder.Column(
"runs",
"_runs.html",
model.Run.objects.filter(
status=model.Run.STATUS.active,
# only show stand-alone runs or runs that are a series.
# don't show runs that are individual members of a series.
# to run a member of a series, run the series, then specify
# the build id that has an existing series member for it.
# Having the series members here would be noisy and confusing.
series=None,
),
),
]
def child_query_url(self, obj):
if isinstance(obj, model.Run):
return reverse("runtests_environment", kwargs={"run_id": obj.id})
return super(RunTestsFinder, self).child_query_url(obj)
|
rprata/boost
|
refs/heads/master
|
libs/python/pyste/tests/infosUT.py
|
54
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys
from Pyste.infos import *
from Pyste.policies import *
from Pyste.exporterutils import *
import unittest
#================================================================================
# InfosTest
#================================================================================
class InfosTest(unittest.TestCase):
def testFunctionInfo(self):
info = FunctionInfo('test::foo', 'foo.h')
rename(info, 'hello')
set_policy(info, return_internal_reference())
set_wrapper(info, FunctionWrapper('foo_wrapper'))
info = InfoWrapper(info)
self.assertEqual(info.rename, 'hello')
self.assertEqual(info.policy.Code(), 'return_internal_reference< 1 >')
self.assertEqual(info.wrapper.name, 'foo_wrapper')
def testClassInfo(self):
info = ClassInfo('test::IFoo', 'foo.h')
rename(info.name, 'Name')
rename(info.exclude, 'Exclude')
rename(info, 'Foo')
rename(info.Bar, 'bar')
set_policy(info.Baz, return_internal_reference())
rename(info.operator['>>'], 'from_string')
exclude(info.Bar)
set_wrapper(info.Baz, FunctionWrapper('baz_wrapper'))
info = InfoWrapper(info)
self.assertEqual(info.rename, 'Foo')
self.assertEqual(info['Bar'].rename, 'bar')
self.assertEqual(info['name'].rename, 'Name')
self.assertEqual(info['exclude'].rename, 'Exclude')
self.assertEqual(info['Bar'].exclude, True)
self.assertEqual(info['Baz'].policy.Code(), 'return_internal_reference< 1 >')
self.assertEqual(info['Baz'].wrapper.name, 'baz_wrapper')
self.assertEqual(info['operator']['>>'].rename, 'from_string')
if __name__ == '__main__':
unittest.main()
|
a13m/ansible
|
refs/heads/devel
|
v2/ansible/module_utils/gce.py
|
305
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pprint
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1"
def gce_connect(module, provider=None):
"""Return a Google Cloud Engine connection."""
service_account_email = module.params.get('service_account_email', None)
pem_file = module.params.get('pem_file', None)
project_id = module.params.get('project_id', None)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = os.environ.get('GCE_EMAIL', None)
if not project_id:
project_id = os.environ.get('GCE_PROJECT', None)
if not pem_file:
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
if service_account_email is None or pem_file is None:
try:
import secrets
except ImportError:
secrets = None
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not pem_file:
pem_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
# If we *still* don't have the credentials we need, then it's time to
# just fail out.
if service_account_email is None or pem_file is None or project_id is None:
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
return None
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
if provider is None:
provider = Provider.GCE
try:
gce = get_driver(provider)(service_account_email, pem_file,
datacenter=module.params.get('zone', None),
project=project_id)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except (RuntimeError, ValueError), e:
module.fail_json(msg=str(e), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gce
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: ' + pprint.pformat(vars(error))
|
40223212/2015cdbg4_6-22
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_weakrefset.py
|
766
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
lombritz/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_chart.py
|
271
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_chart(osv.osv_memory):
"""
For Chart of Accounts
"""
_name = "account.chart"
_description = "Account chart"
_columns = {
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
help='Keep empty for all open fiscal years'),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_fiscalyear(self, cr, uid, context=None):
"""Return default Fiscalyear value"""
return self.pool.get('account.fiscalyear').find(cr, uid, context=context)
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
if fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC, p.special DESC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
else:
res['value'] = {'period_from': False, 'period_to': False}
return res
def account_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
fiscalyear_id = data.get('fiscalyear', False) and data['fiscalyear'][0] or False
result['periods'] = []
if data['period_from'] and data['period_to']:
period_from = data.get('period_from', False) and data['period_from'][0] or False
period_to = data.get('period_to', False) and data['period_to'][0] or False
result['periods'] = period_obj.build_ctx_periods(cr, uid, period_from, period_to)
result['context'] = str({'fiscalyear': fiscalyear_id, 'periods': result['periods'], \
'state': data['target_move']})
if fiscalyear_id:
result['name'] += ':' + fy_obj.read(cr, uid, [fiscalyear_id], context=context)[0]['code']
return result
_defaults = {
'target_move': 'posted',
'fiscalyear': _get_fiscalyear,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cbeloni/pychronesapp
|
refs/heads/master
|
backend/venv/lib/python2.7/site-packages/pip/basecommand.py
|
392
|
"""Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
|
hlj2722/flasky
|
refs/heads/master
|
app/api_1_0/users.py
|
104
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
|
rgommers/statsmodels
|
refs/heads/master
|
statsmodels/iolib/tableformatting.py
|
12
|
"""
Summary Table formating
This is here to help keep the formating consistent across the different models
"""
gen_fmt = dict(
data_fmts = ["%s", "%s", "%s", "%s", "%s"],
empty_cell = '',
colwidths = 7, #17,
colsep=' ',
row_pre = ' ',
row_post = ' ',
table_dec_above='=',
table_dec_below=None,
header_dec_below=None,
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
# Note table_1l_fmt over rides the below formating unless it is not
# appended to table_1l
fmt_1_right = dict(
data_fmts = ["%s", "%s", "%s", "%s", "%s"],
empty_cell = '',
colwidths = 16,
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below=None,
header_dec_below=None,
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
fmt_2 = dict(
data_fmts = ["%s", "%s", "%s", "%s"],
empty_cell = '',
colwidths = 10,
colsep=' ',
row_pre = ' ',
row_post = ' ',
table_dec_above='=',
table_dec_below='=',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
# new version
fmt_base = dict(
data_fmts = ["%s", "%s", "%s", "%s", "%s"],
empty_cell = '',
colwidths = 10,
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='=', #TODO need '=' at the last subtable
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
import copy
fmt_2cols = copy.deepcopy(fmt_base)
fmt2 = dict(
data_fmts = ["%18s", "-%19s", "%18s", "%19s"], #TODO:
colsep=' ',
colwidths = 18,
stub_fmt = '-%21s',
)
fmt_2cols.update(fmt2)
fmt_params = copy.deepcopy(fmt_base)
fmt3 = dict(
data_fmts = ["%s", "%s", "%8s", "%s", "%23s"],
)
fmt_params.update(fmt3)
"""
Summary Table formating
This is here to help keep the formating consistent across the different models
"""
fmt_latex = {'colsep': ' & ',
'colwidths': None,
'data_aligns': 'r',
'data_fmt': '%s',
'data_fmts': ['%s'],
'empty': '',
'empty_cell': '',
'fmt': 'ltx',
'header': '%s',
'header_align': 'c',
'header_dec_below': '\\hline',
'header_fmt': '%s',
'missing': '--',
'row_dec_below': None,
'row_post': ' \\\\',
'strip_backslash': True,
'stub': '%s',
'stub_align': 'l',
'stub_fmt': '%s',
'table_dec_above': '\\hline',
'table_dec_below': '\\hline'}
fmt_txt = {'colsep': ' ',
'colwidths': None,
'data_aligns': 'r',
'data_fmts': ['%s'],
'empty': '',
'empty_cell': '',
'fmt': 'txt',
'header': '%s',
'header_align': 'c',
'header_dec_below': '-',
'header_fmt': '%s',
'missing': '--',
'row_dec_below': None,
'row_post': '',
'row_pre': '',
'stub': '%s',
'stub_align': 'l',
'stub_fmt': '%s',
'table_dec_above': '-',
'table_dec_below': None,
'title_align': 'c'}
|
perkinslr/pypyjs
|
refs/heads/master
|
website/js/pypy.js-0.2.0/lib/modules/test/outstanding_bugs.py
|
195
|
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
#
# No test cases for outstanding bugs at the moment.
#
def test_main():
#test_support.run_unittest()
pass
if __name__ == "__main__":
test_main()
|
azverkan/scons
|
refs/heads/master
|
test/Configure/CONFIGUREDIR.py
|
5
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the configure context directory can be specified by
setting the $CONFIGUREDIR construction variable.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write("SConstruct", """\
def CustomTest(context):
context.Message('Executing Custom Test ... ')
context.Result(1)
env = Environment(CONFIGUREDIR = 'custom_config_dir')
conf = Configure(env, custom_tests = {'CustomTest' : CustomTest})
conf.CustomTest();
env = conf.Finish()
""")
test.run()
test.must_exist('custom_config_dir')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mapleoin/cuZmeura
|
refs/heads/master
|
ads/feeds.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of cuZmeură.
# Copyright (c) 2010 Ionuț Arțăriși
# cuZmeură is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
# cuZmeură is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with cuZmeură. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.syndication.feeds import Feed
from ads.models import Article
class latest(Feed):
title = "Știrile cuZmeură"
link = "/feed/"
description = "Știri despre dezvoltarea și activitatea proiectului cuZmeură"
def items(self):
return Article.objects.order_by('-created_at')[:10]
|
dstrockis/outlook-autocategories
|
refs/heads/master
|
lib/rsa/asn1.py
|
97
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ASN.1 definitions.
Not all ASN.1-handling code use these definitions, but when it does, they should be here.
"""
from pyasn1.type import univ, namedtype, tag
class PubKeyHeader(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Null()),
)
class OpenSSLPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PubKeyHeader()),
# This little hack (the implicit tag) allows us to get a Bit String as Octet String
namedtype.NamedType('key', univ.OctetString().subtype(
implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
)
class AsnPubKey(univ.Sequence):
"""ASN.1 contents of DER encoded public key:
RSAPublicKey ::= SEQUENCE {
modulus INTEGER, -- n
publicExponent INTEGER, -- e
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
|
openaid-IATI/OIPA
|
refs/heads/master
|
OIPA/api/chain/urls.py
|
2
|
from django.conf.urls import url
import api.chain.views
app_name = 'api'
urlpatterns = [
url(r'^$',
api.chain.views.ChainList.as_view(),
name='chain-list'),
url(r'^aggregations/',
api.chain.views.ChainAggregations.as_view(),
name='chain-aggregations'),
url(r'^nodes/',
api.chain.views.NodeList.as_view(),
name='chain-nodes'),
url(r'^errors/',
api.chain.views.ErrorList.as_view(),
name='chain-errors'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/links/',
api.chain.views.ChainLinkList.as_view(),
name='chain-link-list'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/nodes/',
api.chain.views.ChainNodeList.as_view(),
name='chain-node-list'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/errors/',
api.chain.views.ChainNodeErrorList.as_view(),
name='chain-error-list'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/activities/',
api.chain.views.ChainActivities.as_view(),
name='chain-activity-list'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/transactions/',
api.chain.views.ChainTransactionList.as_view(),
name='chain-transaction-list'),
url(r'^(?P<pk>[^@$&+,/:;=?]+)/',
api.chain.views.ChainDetail.as_view(),
name='chain-detail'),
]
|
opcode0x90/pyew
|
refs/heads/master
|
vstruct/defs/macho/loader.py
|
18
|
import vstruct
from vstruct.primitives import *
from vstruct.defs.macho.const import *
vm_prot_t = v_uint32
cpu_type_t = v_uint32
cpu_subtype_t = v_uint32
lc_str = v_uint32
class mach_header(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32() # mach magic number identifier
self.cputype = cpu_type_t() # cpu specifier
self.cpusubtype = cpu_subtype_t() # machine specifier
self.filetype = v_uint32() # type of file
self.ncmds = v_uint32() # number of load commands
self.sizeofcmds = v_uint32() # the size of all the load commands
self.flags = v_uint32() # flags
def vsParse(self, bytes, offset=0):
# Over-ride this so we can do the parse, and make sure we
# had the right endianness.
ret = vstruct.VStruct.vsParse(self, bytes, offset=offset)
if self.magic == MH_CIGAM:
self._vs_fmtbase = '>'
ret = vstruct.VStruct.vsParse(self, bytes, offset=offset)
return ret
class mach_header_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32() # mach magic number identifier
self.cputype = cpu_type_t() # cpu specifier
self.cpusubtype = cpu_subtype_t() # machine specifier
self.filetype = v_uint32() # type of file
self.ncmds = v_uint32() # number of load commands
self.sizeofcmds = v_uint32() # the size of all the load commands
self.flags = v_uint32() # flags
self.reserved = v_uint32() # reserved
# FIXME all commands should subclass this one!
class load_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # type of load command
self.cmdsize = v_uint32() # total size of command in bytes
class segment_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SEGMENT
self.cmdsize = v_uint32() # includes sizeof section structs
self.segname = v_str(size=16) # segment name
self.vmaddr = v_uint32() # memory address of this segment
self.vmsize = v_uint32() # memory size of this segment
self.fileoff = v_uint32() # file offset of this segment
self.filesize = v_uint32() # amount to map from the file
self.maxprot = vm_prot_t() # maximum VM protection
self.initprot = vm_prot_t() # initial VM protection
self.nsects = v_uint32() # number of sections in segment
self.flags = v_uint32() # flags
class segment_command_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SEGMENT_64
self.cmdsize = v_uint32() # includes sizeof section_64 structs
self.segname[16] = v_uint8() # segment name
self.vmaddr = v_uint64() # memory address of this segment
self.vmsize = v_uint64() # memory size of this segment
self.fileoff = v_uint64() # file offset of this segment
self.filesize = v_uint64() # amount to map from the file
self.maxprot = vm_prot_t() # maximum VM protection
self.initprot = vm_prot_t() # initial VM protection
self.nsects = v_uint32() # number of sections in segment
self.flags = v_uint32() # flags
class section(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sectname = v_str(size=16) # name of this section
self.segname = v_str(size=16) # segment this section goes in
self.addr = v_uint32() # memory address of this section
self.size = v_uint32() # size in bytes of this section
self.offset = v_uint32() # file offset of this section
self.align = v_uint32() # section alignment (power of 2)
self.reloff = v_uint32() # file offset of relocation entries
self.nreloc = v_uint32() # number of relocation entries
self.flags = v_uint32() # flags (section type and attributes)
self.reserved1 = v_uint32() # reserved (for offset or index)
self.reserved2 = v_uint32() # reserved (for count or sizeof)
class section_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sectname = v_str(size=16) # name of this section
self.segname = v_str(size=16) # segment this section goes in
self.addr = v_uint64() # memory address of this section
self.size = v_uint64() # size in bytes of this section
self.offset = v_uint32() # file offset of this section
self.align = v_uint32() # section alignment (power of 2)
self.reloff = v_uint32() # file offset of relocation entries
self.nreloc = v_uint32() # number of relocation entries
self.flags = v_uint32() # flags (section type and attributes)
self.reserved1 = v_uint32() # reserved (for offset or index)
self.reserved2 = v_uint32() # reserved (for count or sizeof)
self.reserved3 = v_uint32() # reserved
class fvmlib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_IDFVMLIB or LC_LOADFVMLIB
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # library's target pathname
self.minor_version = v_uint32() # library's minor version number
self.header_addr = v_uint32() # library's header address
class dylib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ID_DYLIB, LC_LOAD_{,WEAK_}DYLIB, LC_REEXPORT_DYLIB
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # library's path name
self.timestamp = v_uint32() # library's build time stamp
self.current_version = v_uint32() # library's current version number
self.compatibility_version = v_uint32() # library's compatibility vers number
self.namedata = v_bytes(size=0)
def vsParse(self, bytes, offset=0):
# So we can grab the name data
retoff = vstruct.VStruct.vsParse(self, bytes, offset=offset)
# Grab the name from the inline data...
name = bytes[ offset + self.name : offset + self.cmdsize ]
self.namedata = name.split('\x00', 1)[0]
return retoff
class sub_framework_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_FRAMEWORK
self.cmdsize = v_uint32() # includes umbrella string
self.umbrella = lc_str() # the umbrella framework name
class sub_client_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_CLIENT
self.cmdsize = v_uint32() # includes client string
self.client = lc_str() # the client name
class sub_umbrella_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_UMBRELLA
self.cmdsize = v_uint32() # includes sub_umbrella string
self.sub_umbrella = lc_str() # the sub_umbrella framework name
class sub_library_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_LIBRARY
self.cmdsize = v_uint32() # includes sub_library string
self.sub_library = lc_str() # the sub_library name
class prebound_dylib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_PREBOUND_DYLIB
self.cmdsize = v_uint32() # includes strings
self.name = lc_str() # library's path name
self.nmodules = v_uint32() # number of modules in library
self.linked_modules = lc_str() # bit vector of linked modules
class dylinker_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ID_DYLINKER or LC_LOAD_DYLINKER
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # dynamic linker's path name
class thread_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_THREAD or LC_UNIXTHREAD
self.cmdsize = v_uint32() # total size of this command
class routines_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ROUTINES
self.cmdsize = v_uint32() # total size of this command
self.init_address = v_uint32() # address of initialization routine
self.init_module = v_uint32() # index into the module table that
self.reserved1 = v_uint32()
self.reserved2 = v_uint32()
self.reserved3 = v_uint32()
self.reserved4 = v_uint32()
self.reserved5 = v_uint32()
self.reserved6 = v_uint32()
class routines_command_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ROUTINES_64
self.cmdsize = v_uint32() # total size of this command
self.init_address = v_uint64() # address of initialization routine
self.init_module = v_uint64() # index into the module table that
self.reserved1 = v_uint64()
self.reserved2 = v_uint64()
self.reserved3 = v_uint64()
self.reserved4 = v_uint64()
self.reserved5 = v_uint64()
self.reserved6 = v_uint64()
class symtab_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SYMTAB
self.cmdsize = v_uint32() # sizeof(struct symtab_command)
self.symoff = v_uint32() # symbol table offset
self.nsyms = v_uint32() # number of symbol table entries
self.stroff = v_uint32() # string table offset
self.strsize = v_uint32() # string table size in bytes
class dysymtab_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_DYSYMTAB
self.cmdsize = v_uint32() # sizeof(struct dysymtab_command)
self.ilocalsym = v_uint32() # index to local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextdefsym = v_uint32() # index to externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.iundefsym = v_uint32() # index to undefined symbols
self.nundefsym = v_uint32() # number of undefined symbols
self.tocoff = v_uint32() # file offset to table of contents
self.ntoc = v_uint32() # number of entries in table of contents
self.modtaboff = v_uint32() # file offset to module table
self.nmodtab = v_uint32() # number of module table entries
self.extrefsymoff = v_uint32() # offset to referenced symbol table
self.nextrefsyms = v_uint32() # number of referenced symbol table entries
self.indirectsymoff = v_uint32() # file offset to the indirect symbol table
self.nindirectsyms = v_uint32() # number of indirect symbol table entries
self.extreloff = v_uint32() # offset to external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.locreloff = v_uint32() # offset to local relocation entries
self.nlocrel = v_uint32() # number of local relocation entries
class dylib_table_of_contents(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.symbol_index = v_uint32() # the defined external symbol (index into the symbol table)
self.module_index = v_uint32() # index into the module table this symbol is defined in
class dylib_module(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.module_name = v_uint32() # the module name (index into string table)
self.iextdefsym = v_uint32() # index into externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.irefsym = v_uint32() # index into reference symbol table
self.nrefsym = v_uint32() # number of reference symbol table entries
self.ilocalsym = v_uint32() # index into symbols for local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextrel = v_uint32() # index into external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.iinit_iterm = v_uint32() # low 16 bits are the index into the init section, high 16 bits are the index into the term section
self.ninit_nterm = v_uint32() # low 16 bits are the number of init section entries, high 16 bits are the number of term section entries
self.objc_module_info_addr = v_uint32() # the (__OBJC,__module_info) section
self.objc_module_info_size = v_uint32() # the (__OBJC,__module_info) section
class dylib_module_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.module_name = v_uint32() # the module name (index into string table)
self.iextdefsym = v_uint32() # index into externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.irefsym = v_uint32() # index into reference symbol table
self.nrefsym = v_uint32() # number of reference symbol table entries
self.ilocalsym = v_uint32() # index into symbols for local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextrel = v_uint32() # index into external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.iinit_iterm = v_uint32() # low 16 bits are the index into the init section, high 16 bits are the index into the term section
self.ninit_nterm = v_uint32() # low 16 bits are the number of init section entries, high 16 bits are the number of term section entries
self.objc_module_info_size = v_uint32() # the (__OBJC,__module_info) section
self.objc_module_info_addr = v_uint64() # the (__OBJC,__module_info) section
class dylib_reference(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.flags = v_uint32() # flags to indicate the type of reference
class twolevel_hints_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_TWOLEVEL_HINTS
self.cmdsize = v_uint32() # sizeof(struct twolevel_hints_command)
self.offset = v_uint32() # offset to the hint table
self.nhints = v_uint32() # number of hints in the hint table
class twolevel_hint(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.itoc = v_uint32() # index into the table of contents
class prebind_cksum_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_PREBIND_CKSUM
self.cmdsize = v_uint32() # sizeof(struct prebind_cksum_command)
self.cksum = v_uint32() # the check sum or zero
class uuid_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_UUID
self.cmdsize = v_uint32() # sizeof(struct uuid_command)
self.uuid[16] = v_uint8() # the 128-bit uuid
class rpath_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_RPATH
self.cmdsize = v_uint32() # includes string
self.path = lc_str() # path to add to run path
class linkedit_data_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_CODE_SIGNATURE or LC_SEGMENT_SPLIT_INFO
self.cmdsize = v_uint32() # sizeof(struct linkedit_data_command)
self.dataoff = v_uint32() # file offset of data in __LINKEDIT segment
self.datasize = v_uint32() # file size of data in __LINKEDIT segment
class encryption_info_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ENCRYPTION_INFO
self.cmdsize = v_uint32() # sizeof(struct encryption_info_command)
self.cryptoff = v_uint32() # file offset of encrypted range
self.cryptsize = v_uint32() # file size of encrypted range
self.cryptid = v_uint32() # which enryption system, 0 means not-encrypted yet
class symseg_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SYMSEG
self.cmdsize = v_uint32() # sizeof(struct symseg_command)
self.offset = v_uint32() # symbol segment offset
self.size = v_uint32() # symbol segment size in bytes
class ident_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_IDENT
self.cmdsize = v_uint32() # strings that follow this command
class fvmfile_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_FVMFILE
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # files pathname
self.header_addr = v_uint32() # files virtual address
command_classes = {
LC_SEGMENT: segment_command,
LC_SYMTAB: symtab_command,
LC_LOAD_DYLIB: dylib_command,
}
def getCommandClass(cmdtype):
cls = command_classes.get(cmdtype)
if cls != None:
return cls
return load_command
|
qwefi/nova
|
refs/heads/master
|
nova/vnc/xvp_proxy.py
|
4
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eventlet WSGI Services to proxy VNC for XCP protocol."""
import socket
import webob
import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
from oslo.config import cfg
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common import log as logging
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
xvp_proxy_opts = [
cfg.IntOpt('xvpvncproxy_port',
default=6081,
help='Port that the XCP VNC proxy should bind to'),
cfg.StrOpt('xvpvncproxy_host',
default='0.0.0.0',
help='Address that the XCP VNC proxy should bind to'),
]
CONF = cfg.CONF
CONF.register_opts(xvp_proxy_opts)
class XCPVNCProxy(object):
"""Class to use the xvp auth protocol to proxy instance vnc consoles."""
def one_way_proxy(self, source, dest):
"""Proxy tcp connection from source to dest."""
while True:
try:
d = source.recv(32384)
except Exception:
d = None
# If recv fails, send a write shutdown the other direction
if d is None or len(d) == 0:
dest.shutdown(socket.SHUT_WR)
break
# If send fails, terminate proxy in both directions
try:
# sendall raises an exception on write error, unlike send
dest.sendall(d)
except Exception:
source.close()
dest.close()
break
def handshake(self, req, connect_info, sockets):
"""Execute hypervisor-specific vnc auth handshaking (if needed)."""
host = connect_info['host']
port = int(connect_info['port'])
server = eventlet.connect((host, port))
# Handshake as necessary
if connect_info.get('internal_access_path'):
server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
data = ""
while True:
b = server.recv(1)
if b:
data += b
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit(_("Error in handshake: %s"), data)
return
break
if not b or len(data) > 4096:
LOG.audit(_("Error in handshake: %s"), data)
return
client = req.environ['eventlet.input'].get_socket()
client.sendall("HTTP/1.1 200 OK\r\n\r\n")
sockets['client'] = client
sockets['server'] = server
def proxy_connection(self, req, connect_info, start_response):
"""Spawn bi-directional vnc proxy."""
sockets = {}
t0 = eventlet.spawn(self.handshake, req, connect_info, sockets)
t0.wait()
if not sockets.get('client') or not sockets.get('server'):
LOG.audit(_("Invalid request: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
client = sockets['client']
server = sockets['server']
t1 = eventlet.spawn(self.one_way_proxy, client, server)
t2 = eventlet.spawn(self.one_way_proxy, server, client)
t1.wait()
t2.wait()
# Make sure our sockets are closed
server.close()
client.close()
def __call__(self, environ, start_response):
try:
req = webob.Request(environ)
LOG.audit(_("Request: %s"), req)
token = req.params.get('token')
if not token:
LOG.audit(_("Request made with missing token: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
ctxt = context.get_admin_context()
api = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = api.check_token(ctxt, token)
if not connect_info:
LOG.audit(_("Request made with invalid token: %s"), req)
start_response('401 Not Authorized',
[('content-type', 'text/html')])
return "Not Authorized"
return self.proxy_connection(req, connect_info, start_response)
except Exception as e:
LOG.audit(_("Unexpected error: %s"), e)
class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
"""HttpProtocol wrapper to suppress IOErrors.
The proxy code above always shuts down client connections, so we catch
the IOError that raises when the SocketServer tries to flush the
connection.
"""
def finish(self):
try:
eventlet.green.BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except IOError:
pass
eventlet.greenio.shutdown_safe(self.connection)
self.connection.close()
def get_wsgi_server():
LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"),
version.version_string_with_package())
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
protocol=SafeHttpProtocol,
host=CONF.xvpvncproxy_host,
port=CONF.xvpvncproxy_port)
|
chauhanhardik/populo
|
refs/heads/master
|
common/djangoapps/student/migrations/0032_add_field_UserProfile_country_add_field_UserProfile_city.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.country'
db.add_column('auth_userprofile', 'country',
self.gf('django_countries.fields.CountryField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.city'
db.add_column('auth_userprofile', 'city',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.country'
db.delete_column('auth_userprofile', 'country')
# Deleting field 'UserProfile.city'
db.delete_column('auth_userprofile', 'city')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
praveen-pal/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/course_info_model.py
|
9
|
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from lxml import html, etree
import re
from django.http import HttpResponseBadRequest
import logging
import django.utils
# # TODO store as array of { date, content } and override course_info_module.definition_from_xml
# # This should be in a class which inherits from XmlDescriptor
log = logging.getLogger(__name__)
def get_course_updates(location):
"""
Retrieve the relevant course_info updates and unpack into the model which the client expects:
[{id : location.url() + idx to make unique, date : string, content : html string}]
"""
try:
course_updates = modulestore('direct').get_item(location)
except ItemNotFoundError:
modulestore('direct').create_and_save_xmodule(location)
course_updates = modulestore('direct').get_item(location)
# current db rep: {"_id" : locationjson, "definition" : { "data" : "<ol>[<li><h2>date</h2>content</li>]</ol>"} "metadata" : ignored}
location_base = course_updates.location.url()
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except:
log.error("Cannot parse: " + course_updates.data)
escaped = django.utils.html.escape(course_updates.data)
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
# Confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val
course_upd_collection = []
if course_html_parsed.tag == 'ol':
# 0 is the newest
for idx, update in enumerate(course_html_parsed):
if (len(update) == 0):
continue
elif (len(update) == 1):
# could enforce that update[0].tag == 'h2'
content = update[0].tail
else:
content = "\n".join([html.tostring(ele) for ele in update[1:]])
# make the id on the client be 1..len w/ 1 being the oldest and len being the newest
course_upd_collection.append({"id": location_base + "/" + str(len(course_html_parsed) - idx),
"date": update.findtext("h2"),
"content": content})
return course_upd_collection
def update_course_updates(location, update, passed_id=None):
"""
Either add or update the given course update. It will add it if the passed_id is absent or None. It will update it if
it has an passed_id which has a valid value. Until updates have distinct values, the passed_id is the location url + an index
into the html structure.
"""
try:
course_updates = modulestore('direct').get_item(location)
except ItemNotFoundError:
return HttpResponseBadRequest()
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except:
log.error("Cannot parse: " + course_updates.data)
escaped = django.utils.html.escape(course_updates.data)
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
# if there's no ol, create it
if course_html_parsed.tag != 'ol':
# surround whatever's there w/ an ol
if course_html_parsed.tag != 'li':
# but first wrap in an li
li = etree.Element('li')
li.append(course_html_parsed)
course_html_parsed = li
ol = etree.Element('ol')
ol.append(course_html_parsed)
course_html_parsed = ol
# No try/catch b/c failure generates an error back to client
new_html_parsed = html.fromstring('<li><h2>' + update['date'] + '</h2>' + update['content'] + '</li>')
# ??? Should this use the id in the json or in the url or does it matter?
if passed_id is not None:
idx = get_idx(passed_id)
# idx is count from end of list
course_html_parsed[-idx] = new_html_parsed
else:
course_html_parsed.insert(0, new_html_parsed)
idx = len(course_html_parsed)
passed_id = course_updates.location.url() + "/" + str(idx)
# update db record
course_updates.data = html.tostring(course_html_parsed)
modulestore('direct').update_item(location, course_updates.data)
if (len(new_html_parsed) == 1):
content = new_html_parsed[0].tail
else:
content = "\n".join([html.tostring(ele) for ele in new_html_parsed[1:]])
return {"id": passed_id,
"date": update['date'],
"content": content}
def delete_course_update(location, update, passed_id):
"""
Delete the given course_info update from the db.
Returns the resulting course_updates b/c their ids change.
"""
if not passed_id:
return HttpResponseBadRequest()
try:
course_updates = modulestore('direct').get_item(location)
except ItemNotFoundError:
return HttpResponseBadRequest()
# TODO use delete_blank_text parser throughout and cache as a static var in a class
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except:
log.error("Cannot parse: " + course_updates.data)
escaped = django.utils.html.escape(course_updates.data)
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
if course_html_parsed.tag == 'ol':
# ??? Should this use the id in the json or in the url or does it matter?
idx = get_idx(passed_id)
# idx is count from end of list
element_to_delete = course_html_parsed[-idx]
if element_to_delete is not None:
course_html_parsed.remove(element_to_delete)
# update db record
course_updates.data = html.tostring(course_html_parsed)
store = modulestore('direct')
store.update_item(location, course_updates.data)
return get_course_updates(location)
def get_idx(passed_id):
"""
From the url w/ idx appended, get the idx.
"""
idx_matcher = re.search(r'.*?/?(\d+)$', passed_id)
if idx_matcher:
return int(idx_matcher.group(1))
|
diagramsoftware/odoo
|
refs/heads/8.0
|
addons/l10n_be_coda/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Import Bank CODA Statements',
'version': '2.1',
'author': 'Noviat',
'category': 'Accounting & Finance',
'description': '''
Module to import CODA bank statements.
======================================
Supported are CODA flat files in V2 format from Belgian bank accounts.
----------------------------------------------------------------------
* CODA v1 support.
* CODA v2.2 support.
* Foreign Currency support.
* Support for all data record types (0, 1, 2, 3, 4, 8, 9).
* Parsing & logging of all Transaction Codes and Structured Format
Communications.
* Automatic Financial Journal assignment via CODA configuration parameters.
* Support for multiple Journals per Bank Account Number.
* Support for multiple statements from different bank accounts in a single
CODA file.
* Support for 'parsing only' CODA Bank Accounts (defined as type='info' in
the CODA Bank Account configuration records).
* Multi-language CODA parsing, parsing configuration data provided for EN,
NL, FR.
The machine readable CODA Files are parsed and stored in human readable format in
CODA Bank Statements. Also Bank Statements are generated containing a subset of
the CODA information (only those transaction lines that are required for the
creation of the Financial Accounting records). The CODA Bank Statement is a
'read-only' object, hence remaining a reliable representation of the original
CODA file whereas the Bank Statement will get modified as required by accounting
business processes.
CODA Bank Accounts configured as type 'Info' will only generate CODA Bank Statements.
A removal of one object in the CODA processing results in the removal of the
associated objects. The removal of a CODA File containing multiple Bank
Statements will also remove those associated statements.
Instead of a manual adjustment of the generated Bank Statements, you can also
re-import the CODA after updating the OpenERP database with the information that
was missing to allow automatic reconciliation.
Remark on CODA V1 support:
~~~~~~~~~~~~~~~~~~~~~~~~~~
In some cases a transaction code, transaction category or structured
communication code has been given a new or clearer description in CODA V2.The
description provided by the CODA configuration tables is based upon the CODA
V2.2 specifications.
If required, you can manually adjust the descriptions via the CODA configuration menu.
''',
'depends': ['account_voucher', 'base_iban', 'l10n_be_invoice_bba'],
'demo': ['l10n_be_coda_demo.xml'],
'data': [
'l10n_be_coda_wizard.xml',
'l10n_be_coda_view.xml',
],
'auto_install': False,
'website': 'https://www.odoo.com/page/accounting',
'installable': True,
'license': 'AGPL-3',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pombredanne/pulp
|
refs/heads/master
|
server/pulp/server/db/manage.py
|
6
|
"""
This module's main() function becomes the pulp-manage-db.py script.
"""
from gettext import gettext as _
from optparse import OptionParser
import logging
import os
import sys
import traceback
from pulp.plugins.loader.api import load_content_types
from pulp.plugins.loader.manager import PluginManager
from pulp.server import logs
from pulp.server.db import connection
from pulp.server.db.migrate import models
from pulp.server.db import model
from pulp.server.managers import factory
from pulp.server.managers.auth.role.cud import RoleManager, SUPER_USER_ROLE
from pulp.server.managers.auth.user.cud import UserManager
os.environ['DJANGO_SETTINGS_MODULE'] = 'pulp.server.webservices.settings'
_logger = None
class DataError(Exception):
"""
This Exception is used when we want to return the os.EX_DATAERR code.
"""
pass
class UnperformedMigrationException(Exception):
"""
This exception is raised when there are unperformed exceptions.
"""
pass
def parse_args():
"""
Parse the command line arguments into the flags that we accept. Returns the parsed options.
"""
parser = OptionParser()
parser.add_option('--test', action='store_true', dest='test',
default=False,
help=_('Run migration, but do not update version'))
parser.add_option('--dry-run', action='store_true', dest='dry_run', default=False,
help=_('Perform a dry run with no changes made. Returns 1 if there are '
'migrations to apply.'))
options, args = parser.parse_args()
if args:
parser.error(_('Unknown arguments: %s') % ', '.join(args))
return options
def migrate_database(options):
"""
Perform the migrations for each migration package found in pulp.server.db.migrations.
:param options: The command line parameters from the user
"""
migration_packages = models.get_migration_packages()
unperformed_migrations = False
for migration_package in migration_packages:
if migration_package.current_version > migration_package.latest_available_version:
msg = _('The database for migration package %(p)s is at version %(v)s, which is larger '
'than the latest version available, %(a)s.')
msg = msg % ({'p': migration_package.name, 'v': migration_package.current_version,
'a': migration_package.latest_available_version})
raise DataError(msg)
if migration_package.current_version == migration_package.latest_available_version:
message = _('Migration package %(p)s is up to date at version %(v)s')
message = message % {'p': migration_package.name,
'v': migration_package.latest_available_version}
_logger.info(message)
continue
try:
for migration in migration_package.unapplied_migrations:
message = _('Applying %(p)s version %(v)s')
message = message % {'p': migration_package.name, 'v': migration.version}
_logger.info(message)
if options.dry_run:
unperformed_migrations = True
message = _('Would have applied migration to %(p)s version %(v)s')
message = message % {'p': migration_package.name, 'v': migration.version}
else:
# We pass in !options.test to stop the apply_migration method from updating the
# package's current version when the --test flag is set
migration_package.apply_migration(migration,
update_current_version=not options.test)
message = _('Migration to %(p)s version %(v)s complete.')
message = message % {'p': migration_package.name,
'v': migration_package.current_version}
_logger.info(message)
except Exception:
# Log the error and what migration failed before allowing main() to handle the exception
error_message = _('Applying migration %(m)s failed.\n\nHalting migrations due to a '
'migration failure.')
error_message = error_message % {'m': migration.name}
_logger.critical(error_message)
raise
if not options.dry_run:
ensure_database_indexes()
if options.dry_run and unperformed_migrations:
raise UnperformedMigrationException
def ensure_database_indexes():
"""
Ensure that the minimal required indexes have been created for all collections.
Gratuitiously create MongoEngine based models indexes if they do not already exist.
"""
model.RepositoryContentUnit.ensure_indexes()
model.Repository.ensure_indexes()
model.ReservedResource.ensure_indexes()
model.TaskStatus.ensure_indexes()
model.Worker.ensure_indexes()
model.CeleryBeatLock.ensure_indexes()
# Load all the model classes that the server knows about and ensure their inexes as well
plugin_manager = PluginManager()
for model_class in plugin_manager.unit_models.itervalues():
model_class.ensure_indexes()
def main():
"""
This is the high level entry method. It does logging if any Exceptions are raised.
"""
if os.getuid() == 0:
print >> sys.stderr, _('This must not be run as root, but as the same user apache runs as.')
return os.EX_USAGE
try:
options = parse_args()
_start_logging()
connection.initialize(max_timeout=1)
return _auto_manage_db(options)
except UnperformedMigrationException:
return 1
except DataError, e:
_logger.critical(str(e))
_logger.critical(''.join(traceback.format_exception(*sys.exc_info())))
return os.EX_DATAERR
except Exception, e:
_logger.critical(str(e))
_logger.critical(''.join(traceback.format_exception(*sys.exc_info())))
return os.EX_SOFTWARE
def _auto_manage_db(options):
"""
Find and apply all available database migrations, and install or update all available content
types.
:param options: The command line parameters from the user.
"""
unperformed_migrations = False
message = _('Loading content types.')
_logger.info(message)
# Note that if dry_run is False, None is always returned
old_content_types = load_content_types(dry_run=options.dry_run)
if old_content_types:
for content_type in old_content_types:
message = _(
'Would have created or updated the following type definition: ' + content_type.id)
_logger.info(message)
message = _('Content types loaded.')
_logger.info(message)
message = _('Ensuring the admin role and user are in place.')
_logger.info(message)
# Due to the silliness of the factory, we have to initialize it because the UserManager and
# RoleManager are going to try to use it.
factory.initialize()
role_manager = RoleManager()
if options.dry_run:
if not role_manager.get_role(SUPER_USER_ROLE):
unperformed_migrations = True
message = _('Would have created the admin role.')
_logger.info(message)
else:
role_manager.ensure_super_user_role()
user_manager = UserManager()
if options.dry_run:
if not user_manager.get_admins():
unperformed_migrations = True
message = _('Would have created the default admin user.')
_logger.info(message)
else:
user_manager.ensure_admin()
message = _('Admin role and user are in place.')
_logger.info(message)
message = _('Beginning database migrations.')
_logger.info(message)
migrate_database(options)
message = _('Database migrations complete.')
_logger.info(message)
if unperformed_migrations:
return 1
return os.EX_OK
def _start_logging():
"""
Call into Pulp to get the logging started, and set up the _logger to be used in this module.
"""
global _logger
logs.start_logging()
_logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
_logger.root.addHandler(console_handler)
|
outlierbio/ob-pipelines
|
refs/heads/master
|
ob_pipelines/apps/multiqc/test_aws_batch.py
|
1
|
import os.path as op
import ob_pipelines
from ob_pipelines.batch import BatchClient
module_dir = op.dirname(ob_pipelines.__file__) # __file__ is __init__.py, have to get parent folder
bc = BatchClient() # thin wrapper for boto3.client('batch')
# assuming job definition is already registered
parameters = {
'analysis_dir': 's3://com-dnli-ngs/test/1e4/'
}
job_id = bc.submit_job('multiqc', parameters)
bc.get_job_status(job_id)
bc.wait_on_job(job_id)
|
nadley/Sick-Beard
|
refs/heads/development
|
sickbeard/search_queue.py
|
29
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import time
import sickbeard
from sickbeard import db, logger, common, exceptions, helpers
from sickbeard import generic_queue
from sickbeard import search
from sickbeard import ui
BACKLOG_SEARCH = 10
RSS_SEARCH = 20
MANUAL_SEARCH = 30
class SearchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SEARCHQUEUE"
def is_in_queue(self, show, segment):
for cur_item in self.queue:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
return False
def is_ep_in_queue(self, ep_obj):
for cur_item in self.queue:
if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj:
return True
return False
def pause_backlog(self):
self.min_priority = generic_queue.QueuePriorities.HIGH
def unpause_backlog(self):
self.min_priority = 0
def is_backlog_paused(self):
# backlog priorities are NORMAL, this should be done properly somewhere
return self.min_priority >= generic_queue.QueuePriorities.NORMAL
def is_backlog_in_progress(self):
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, BacklogQueueItem):
return True
return False
def add_item(self, item):
if isinstance(item, RSSSearchQueueItem):
generic_queue.GenericQueue.add_item(self, item)
# don't do duplicates
elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj):
generic_queue.GenericQueue.add_item(self, item)
else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, ep_obj):
generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.ep_obj = ep_obj
self.success = None
def execute(self):
generic_queue.QueueItem.execute(self)
logger.log("Searching for download for " + self.ep_obj.prettyName())
foundEpisode = search.findEpisode(self.ep_obj, manualSearch=True)
result = False
if not foundEpisode:
ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName())
logger.log(u"Unable to find a download for "+self.ep_obj.prettyName())
else:
# just use the first result for now
logger.log(u"Downloading episode from " + foundEpisode.url)
result = search.snatchEpisode(foundEpisode)
providerModule = foundEpisode.provider
if not result:
ui.notifications.error('Error while attempting to snatch '+foundEpisode.name+', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
def finish(self):
# don't let this linger if something goes wrong
if self.success == None:
self.success = False
generic_queue.QueueItem.finish(self)
class RSSSearchQueueItem(generic_queue.QueueItem):
def __init__(self):
generic_queue.QueueItem.__init__(self, 'RSS Search', RSS_SEARCH)
def execute(self):
generic_queue.QueueItem.execute(self)
self._changeMissingEpisodes()
logger.log(u"Beginning search for new episodes on RSS")
foundResults = search.searchForNeededEpisodes()
if not len(foundResults):
logger.log(u"No needed episodes found on the RSS feeds")
else:
for curResult in foundResults:
search.snatchEpisode(curResult)
time.sleep(2)
generic_queue.QueueItem.finish(self)
def _changeMissingEpisodes(self):
logger.log(u"Changing all old missing episodes to status WANTED")
curDate = datetime.date.today().toordinal()
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND airdate < ?", [common.UNAIRED, curDate])
for sqlEp in sqlResults:
try:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + sqlEp["showid"])
return None
if show == None:
logger.log(u"Unable to find the show with ID "+str(sqlEp["showid"])+" in your show list! DB value was "+str(sqlEp), logger.ERROR)
return None
ep = show.getEpisode(sqlEp["season"], sqlEp["episode"])
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
ep.saveToDB()
class BacklogQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
self.priority = generic_queue.QueuePriorities.LOW
self.thread_name = 'BACKLOG-'+str(show.tvdbid)
self.show = show
self.segment = segment
logger.log(u"Seeing if we need any episodes from "+self.show.name+" season "+str(self.segment))
myDB = db.DBConnection()
# see if there is anything in this season worth searching for
if not self.show.air_by_date:
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ?", [self.show.tvdbid, self.segment])
else:
segment_year, segment_month = map(int, self.segment.split('-'))
min_date = datetime.date(segment_year, segment_month, 1)
# it's easier to just hard code this than to worry about rolling the year over or making a month length map
if segment_month == 12:
max_date = datetime.date(segment_year, 12, 31)
else:
max_date = datetime.date(segment_year, segment_month+1, 1) - datetime.timedelta(days=1)
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ?",
[self.show.tvdbid, min_date.toordinal(), max_date.toordinal()])
anyQualities, bestQualities = common.Quality.splitQuality(self.show.quality) #@UnusedVariable
self.wantSeason = self._need_any_episodes(statusResults, bestQualities)
def execute(self):
generic_queue.QueueItem.execute(self)
results = search.findSeason(self.show, self.segment)
# download whatever we find
if results:
for curResult in results:
search.snatchEpisode(curResult)
time.sleep(5)
self.finish()
def _need_any_episodes(self, statusResults, bestQualities):
wantSeason = False
# check through the list of statuses to see if we want any
for curStatusResult in statusResults:
curCompositeStatus = int(curStatusResult["status"])
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(bestQualities)
else:
highestBestQuality = 0
# if we need a better one then say yes
if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER, common.SNATCHED_FRENCH) and curQuality < highestBestQuality) or curStatus == common.WANTED:
wantSeason = True
break
return wantSeason
|
dchaplinsky/pep.org.ua
|
refs/heads/master
|
pepdb/core/migrations/0093_declaration_nacp_declaration.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0092_auto_20160804_1555'),
]
operations = [
migrations.AddField(
model_name='declaration',
name='nacp_declaration',
field=models.BooleanField(default=False, db_index=True, verbose_name='\u0414\u0435\u043a\u043b\u0430\u0440\u0430\u0446\u0456\u044f \u041d\u0410\u0417\u041a'),
),
]
|
kmatzen/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/indexed_items.py
|
127
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return zip(range(len(items)), items)
|
Lekanich/intellij-community
|
refs/heads/master
|
python/testData/mover/emptyLine_afterDown.py
|
83
|
if True:
a = 1
b = 2
|
ledtvavs/repository.ledtv
|
refs/heads/master
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/grifthost.py
|
4
|
"""
grifthost urlresolver plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
from lib import helpers
from lib import jsunpack
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class GrifthostResolver(UrlResolver):
name = "grifthost"
domains = ["grifthost.com"]
pattern = '(?://|\.)(grifthost\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
data = helpers.get_hidden(html)
headers['Cookie'] = response.get_headers(as_dict=True).get('Set-Cookie', '')
html = self.net.http_POST(web_url, headers=headers, form_data=data).content
sources = helpers.scrape_sources(html)
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return 'http://grifthost.com/%s' % (media_id)
|
woest85/PokemonGo-Map-1
|
refs/heads/develop
|
pogom/fakePogoApi.py
|
15
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import struct
from time import time
from .utils import get_args
class FakePogoApi:
def __init__(self, mock):
# Fake a 24 hour auth token
self._auth_provider = type('', (object,), {"_ticket_expire": (time() + (3600 * 24)) * 1000})()
self.inited = False
self.mock = mock
def set_proxy(self, proxy_config):
pass
def activate_signature(self, library):
pass
def set_position(self, lat, lng, alt):
# meters radius (very, very rough approximation -- deal with it)
if not self.inited:
args = get_args()
radius = 140 * args.step_limit
requests.get('{}/login/{}/{}/{}'.format(self.mock, lat, lng, radius))
self.inited = True
def set_authentication(self, provider=None, oauth2_refresh_token=None, username=None, password=None):
pass
def i2f(self, i):
return struct.unpack('<d', struct.pack('<Q', i))[0]
def get_map_objects(self, latitude=None, longitude=None, since_timestamp_ms=None, cell_id=None):
location = (self.i2f(latitude), self.i2f(longitude))
response = requests.get('{}/scan/{}/{}'.format(self.mock, *location))
return response.json()
|
midonet/python-midonetclient
|
refs/heads/master
|
src/midonetclient/neutron/bgp.py
|
2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from midonetclient import url_provider
from midonetclient import util
from midonetclient import vendor_media_type as mt
LOG = logging.getLogger(__name__)
class BgpUrlProviderMixin(url_provider.UrlProviderMixin):
"""BGP URL provider mixin
This mixin provides URLs for BGP
"""
def bgp_url(self, bgp_id):
return self.template_url("bgpTemplate", bgp_id)
def bgps_url(self):
return self.resource_url("bgps")
def adroute_url(self, adroute_id):
return self.template_url("adRouteTemplate", adroute_id)
def adroutes_url(self, bgp_id):
return self.bgp_url(bgp_id) + "/ad_routes"
class BgpClientMixin(BgpUrlProviderMixin):
"""Bgp mixin
Mixin that defines all the Neutron bgp operations in MidoNet API.
"""
@util.convert_case
def create_bgp(self, bgp):
LOG.info("create_bgp %r", bgp)
return self.client.post(self.bgps_url(),
mt.APPLICATION_BGP_JSON, body=bgp)
def delete_bgp(self, rtr_id):
LOG.info("delete_bgp %r", rtr_id)
self.client.delete(self.bgp_url(rtr_id))
@util.convert_case
def get_bgp(self, rtr_id, fields=None):
LOG.info("get_bgp %r", rtr_id)
return self.client.get(self.bgp_url(rtr_id), mt.APPLICATION_BGP_JSON)
@util.convert_case
def get_bgps(self, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
LOG.info("get_bgps")
return self.client.get(self.bgps_url(),
mt.APPLICATION_BGP_COLLECTION_JSON)
@util.convert_case
def update_bgp(self, rtr_id, bgp):
LOG.info("update_bgp %r", bgp)
return self.client.put(self.bgp_url(rtr_id),
mt.APPLICATION_BGP_JSON, bgp)
@util.convert_case
def create_adroute(self, adroute):
LOG.info("create_adroute %r", adroute)
# convert_case converted to camel
return self.client.post(self.adroutes_url(adroute["bgpId"]),
mt.APPLICATION_AD_ROUTE_JSON, body=adroute)
def delete_adroute(self, adroute_id):
LOG.info("delete_adroute %r", adroute_id)
self.client.delete(self.adroute_url(adroute_id))
@util.convert_case
def get_adroute(self, adroute_id):
LOG.info("get_adroute %r", adroute_id)
return self.client.get(self.adroute_url(adroute_id),
mt.APPLICATION_AD_ROUTE_JSON)
@util.convert_case
def get_adroutes(self, bgp_id):
LOG.info("get_adroutes %r", bgp_id)
return self.client.get(self.adroutes_url(bgp_id),
mt.APPLICATION_AD_ROUTE_COLLECTION_JSON)
|
tdhopper/scikit-learn
|
refs/heads/master
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
jagguli/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/db/models/base.py
|
71
|
import types
import sys
from itertools import izip
import django.db.models.manager # Imported to register signal handler.
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (OneToOneRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
import django.utils.copycompat as copy
from django.utils.functional import curry, update_wrapper
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
from django.conf import settings
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
while base._meta.proxy:
base = base._meta.proxy_for_model
new_class._meta.setup_proxy(base)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
while base._meta.proxy:
# Skip over a proxy class to the "real" base it proxies.
base = base._meta.proxy_for_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk, using=using)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True, using=using)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be exluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = capfirst(opts.get_field(field_name).verbose_name)
# Insert the error into the error dict, very sneaky
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
avinashkunuje/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/zipfileset_mock.py
|
167
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def make_factory(ziphashes):
"""ZipFileSet factory routine that looks up zipfiles in a dict;
each zipfile should also be a dict of member names -> contents."""
class MockZipFileSet(object):
def __init__(self, url):
self._url = url
self._ziphash = ziphashes[url]
def namelist(self):
return self._ziphash.keys()
def read(self, member):
return self._ziphash[member]
def close(self):
pass
def maker(url):
# We return None because there's no tempfile to delete.
return (None, MockZipFileSet(url))
return maker
|
dev-zzo/pwn-tools
|
refs/heads/master
|
embedded/stm32bldr.py
|
1
|
"""
STM32 Bootloader tool
See the following STMicro application notes:
* AN2606 for the general description
* AN3155 for the protocol
Dependencies:
* pySerial
"""
import binascii
import struct
import serial
def log(text):
print(text)
# AN2606: 3.2 Bootloader identification
__bl_interfaces = [
(),
( 'usart' ),
( 'usart', 'usart2' ),
( 'usart', 'can', 'dfu' ),
( 'usart', 'dfu' ),
( 'usart', 'i2c' ),
( 'i2c' ),
( 'usart', 'can', 'dfu', 'i2c' ),
( 'i2c', 'spi' ),
( 'usart', 'can', 'dfu', 'i2c', 'spi' ),
( 'usart', 'dfu', 'i2c' ),
( 'usart', 'i2c', 'spi' ),
( 'usart', 'spi' ),
( 'usart', 'dfu', 'i2c', 'spi' ),
]
# AN2606: 48 Device-dependent bootloader parameters
__products = {
"\x04\x10": { 'name': "STM32F10xxxx (medium density)", 'flash_base': 0x08000000, 'flash_size': 0x20000, 'ram_base': 0x20000000, 'ram_size': 0x5000, 'ram_valid': 0x20000200 }
# TODO: add more devices!
}
class BootloaderError(Exception):
"Generic bootloader error"
pass
class TimeoutError(BootloaderError):
"Communications timeout"
pass
class ProtocolError(BootloaderError):
"Data exchange protocol error"
pass
class CommandError(BootloaderError):
"Command execution error"
pass
ACK = "\x79"
NAK = "\x1F"
def _append_checksum(data):
"Compute and append the checksum"
cs = 0
if len(data) == 1:
cs = (~ord(data)) & 0xFF
else:
for x in data:
cs ^= ord(x)
return data + chr(cs)
class Stm32Bootloader(object):
"Encapsulates the bootloader functionality"
def __init__(self, port, autobaud=True):
self._p = port
if autobaud:
self._run_autobaud()
def _run_autobaud(self):
"Automatic baud rate detection procedure"
self._p.write("\x7F")
if _receive_ack(self._p):
log("Autobaud procedure successful (got ACK)")
else:
log("Autobaud procedure successful (got NAK; assuming baud rate is correct)")
def _receive_bytes(self, count):
"Receive N bytes from the port"
buffer = ''
while count > 0:
chunk = self._p.read(count)
if not chunk:
raise TimeoutError("receiving data")
buffer += chunk
count -= len(chunk)
return buffer
def _receive_ack(self):
"Receive and verify the ACK byte"
ack = self._p.read()
if not ack:
raise TimeoutError("receiving ACK")
if ack == ACK:
return True
if ack == NAK:
return False
raise ProtocolError("unexpected response: %02x" % ord(ack))
def _send_data_check_ack(self, data):
self._p.write(_append_checksum(data))
return self._receive_ack()
def _receive_data_check_ack(self, count):
data = self._receive_bytes(count)
if not self._receive_ack():
raise ProtocolError("expected ACK; got NAK instead")
return data
def get_blinfo(self):
"Retrieve the bootloader version and the list of supported commands"
if not self._send_data_check_ack("\x00"):
raise CommandError("command failed")
count = struct.unpack('B', self._receive_bytes(1))[0] + 1
rsp = self._receive_data_check_ack(count)
version = ord(rsp[0]) & 0xFF
supported_cmds = rsp[1:]
return { 'version': version, 'supported_cmds': supported_cmds }
def get_pid(self):
"Retrieve the product ID (2 bytes currently)"
if not self._send_data_check_ack("\x02"):
raise CommandError("command failed")
count = struct.unpack('B', self._receive_bytes(1))[0] + 1
rsp = self._receive_data_check_ack(count)
return rsp
def read_memory(self, addr, count):
"Read memory region"
if not self._send_data_check_ack("\x11"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
if not self._send_data_check_ack(struct.pack('B', count - 1)):
raise CommandError("count is rejected by the device")
rsp = self._receive_bytes(count)
return rsp
def write_memory(self, addr, data):
"Write memory region"
if not self._send_data_check_ack("\x31"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
if not self._send_data_check_ack(struct.pack('B', len(data) - 1) + data):
raise CommandError("checksum error")
# NOTE: according to the diagram in AN3155,
# NAK is not sent if memory address is invalid
def erase_memory(self, pages):
"Erase memory pages"
if not self._send_data_check_ack("\x43"):
raise CommandError("read protection is enabled")
if pages is None:
# Whole device
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
else:
# Specific pages
data = struct.pack('B%dB' % len(pages), len(pages) - 1, *pages)
if not self._send_data_check_ack(data):
raise CommandError("checksum error")
def write_protect(self, sectors):
"Apply write protection to flash sectors"
if not self._send_data_check_ack("\x63"):
raise CommandError("read protection is enabled")
data = struct.pack('B%dB' % len(sectors), len(sectors) - 1, *sectors)
if not self._send_data_check_ack(data):
raise CommandError("checksum error")
def write_unprotect(self):
"Remove write protection from all flash"
if not self._send_data_check_ack("\x73"):
raise CommandError("read protection is enabled")
self._receive_ack()
def readout_protect(self):
"Enable readout protection on the device"
if not self._send_data_check_ack("\x82"):
raise CommandError("read protection is enabled")
self._receive_ack()
def readout_unprotect(self):
"Disable readout protection on the device"
if not self._send_data_check_ack("\x92"):
raise CommandError("something went wrong")
self._receive_ack()
def go(self, addr):
"Start executing code from the specified address"
if not self._send_data_check_ack("\x21"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
# End
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
sys.exit(1)
port = sys.argv[1]
baudrate = 57600
p = serial.Serial(port, baudrate, parity=serial.PARITY_EVEN, timeout=2)
bl = Stm32Bootloader(p)
blid = bl.get_blinfo()['version']
log("Bootloader version: %02x" % blid)
bl_ifs = __bl_interfaces[blid >> 4]
log("Bootloader interfaces: %s" % str(bl_ifs))
pid = bl.get_pid()
log("Product ID: %s" % binascii.hexlify(pid))
product = __products[pid]
log("Product: %s" % product['name'])
flash_base = product['flash_base']
flash_size = product['flash_size']
block_size = 0x100
log("Dumping memory: %08x:%08x" % (flash_base, flash_base + flash_size))
with open('flash_dump.bin', 'wb') as fp:
for offset in xrange(0, flash_size, block_size):
data = bl.read_memory(flash_base + offset, block_size)
fp.write(data)
log("Dumping completed")
# EOF
|
Joergen/olympia
|
refs/heads/master
|
apps/amo/urls.py
|
1
|
from waffle.views import wafflejs
from django.conf.urls import include, patterns, url
from django.views.decorators.cache import never_cache
from . import install, views
services_patterns = patterns(
'',
url('^monitor(.json)?$', never_cache(views.monitor),
name='amo.monitor'),
url('^loaded$', never_cache(views.loaded), name='amo.loaded'),
url('^csp/report$', views.cspreport, name='amo.csp.report'),
url('^builder-pingback', views.builder_pingback,
name='amo.builder-pingback'),
url('^timing/record$', views.record, name='amo.timing.record'),
url('^pfs.php$', views.plugin_check_redirect, name='api.plugincheck'),
url('^install.php$', install.install, name='api.install'),
)
urlpatterns = patterns(
'',
url('^robots.txt$', views.robots, name='robots.txt'),
url('^contribute.json$', views.contribute, name='contribute.json'),
url(r'^wafflejs$', wafflejs, name='wafflejs'),
('^services/', include(services_patterns)),
url('^opensearch.xml$', 'api.views.render_xml',
{'template': 'amo/opensearch.xml'},
name='amo.opensearch'),
)
|
Jonekee/chromium.src
|
refs/heads/nw12
|
tools/telemetry/telemetry/page/page_set.py
|
11
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import inspect
import os
from telemetry.page import page as page_module
from telemetry.user_story import user_story_set
from telemetry.util import cloud_storage
PUBLIC_BUCKET = cloud_storage.PUBLIC_BUCKET
PARTNER_BUCKET = cloud_storage.PARTNER_BUCKET
INTERNAL_BUCKET = cloud_storage.INTERNAL_BUCKET
class PageSetError(Exception):
pass
class PageSet(user_story_set.UserStorySet):
def __init__(self, file_path=None, archive_data_file='', user_agent_type=None,
serving_dirs=None, bucket=None):
# The default value of file_path is location of the file that define this
# page set instance's class.
# TODO(chrishenry): Move this logic to user_story_set. Consider passing
# a base_dir directly. Alternatively, kill this and rely on the default
# behavior of using the instance's class file location.
if file_path is None:
file_path = inspect.getfile(self.__class__)
# Turn pyc file into py files if we can
if file_path.endswith('.pyc') and os.path.exists(file_path[:-1]):
file_path = file_path[:-1]
self.file_path = file_path
super(PageSet, self).__init__(
archive_data_file=archive_data_file, cloud_storage_bucket=bucket,
serving_dirs=serving_dirs)
# These attributes can be set dynamically by the page set.
self.user_agent_type = user_agent_type
@property
def pages(self):
return self.user_stories
def AddUserStory(self, user_story):
assert isinstance(user_story, page_module.Page)
assert user_story.page_set is self
super(PageSet, self).AddUserStory(user_story)
@property
def base_dir(self):
if os.path.isfile(self.file_path):
return os.path.dirname(self.file_path)
else:
return self.file_path
def ReorderPageSet(self, results_file):
"""Reorders this page set based on the results of a past run."""
page_set_dict = {}
for page in self.user_stories:
page_set_dict[page.url] = page
user_stories = []
with open(results_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
csv_header = csv_reader.next()
if 'url' not in csv_header:
raise Exception('Unusable results_file.')
url_index = csv_header.index('url')
for csv_row in csv_reader:
if csv_row[url_index] in page_set_dict:
self.AddUserStory(page_set_dict[csv_row[url_index]])
else:
raise Exception('Unusable results_file.')
return user_stories
|
noba3/KoTos
|
refs/heads/master
|
addons/plugin.video.mega/resources/lib/platform_libraries/Linux/64bit/Crypto/Cipher/__init__.py
|
271
|
"""Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
|
alianmohammad/gem5-linux-kernel
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
TheWardoctor/Wardoctors-repo
|
refs/heads/master
|
script.extendedinfo/resources/lib/dialogs/BaseClasses.py
|
2
|
# -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmcgui
class WindowXML(xbmcgui.WindowXML):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXML.__init__(self)
self.window_type = "window"
def onInit(self):
self.window_id = xbmcgui.getCurrentWindowId()
self.window = xbmcgui.Window(self.window_id)
class DialogXML(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.window_type = "dialog"
def onInit(self):
self.window_id = xbmcgui.getCurrentWindowDialogId()
self.window = xbmcgui.Window(self.window_id)
|
Wyn10/Cnchi
|
refs/heads/master
|
cnchi/ui/html/pages/_04_location/location.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# location.py
#
# Copyright © 2016 Antergos
#
# This file is part of The Antergos Build Server, (AntBS).
#
# AntBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# AntBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with AntBS; If not, see <http://www.gnu.org/licenses/>.
# Standard Lib
from _base_object import (
json
)
from modules.location import LocationModule
from modules.keymap import KeymapModule
from .._00_base.html_page import HTMLPage, bg_thread
class LocationPage(HTMLPage):
"""
The first page shown when the app starts.
Class Attributes:
Also see `HTMLPage.__doc__`
"""
def __init__(self, name='location', index=0, *args, **kwargs):
"""
Attributes:
Also see `HTMLPage.__doc__`.
Args:
name (str): A name for this widget.
"""
super().__init__(name=name, index=index, *args, **kwargs)
self._module = None
self.locations = None
self.timezone_map_enabled = False
self.locations_items = []
self.keyboard_layouts = []
self.page_tabs_requested = []
self.signals.extend(['show-all-locations', 'load-keyboard-layouts', 'enable-timezone-map'])
self.tabs.extend([
(_('Location'), True),
(_('Keyboard Layout'), False),
(_('Timezone'), False)
])
self._create_and_connect_signals()
self._initialize_page_data()
def _get_default_template_vars(self):
signals = json.dumps(self.signals)
tpl_vars = super()._get_default_template_vars()
tpl_vars.update({
'signals': signals,
'tabs': self.tabs,
'locations': self.locations_items,
'show_all_locations': self._pages_data.location.show_all_locations,
'list_items': [],
'keyboard_layouts': self.keyboard_layouts,
'timezone_map_enabled': self.timezone_map_enabled
})
return tpl_vars
def _get_initial_page_data(self):
return {
'show_all_locations': False,
'keyboard_layout': None,
'keyboard_variant': None
}
def enable_timezone_map_cb(self, *args):
self.timezone_map_enabled = True
@bg_thread
def load_keyboard_layouts_cb(self, *args):
if not self.keyboard_layouts:
keymap_module = KeymapModule()
keymap_module.initialize()
self.keyboard_layouts = keymap_module.get_keyboard_layouts_list()
self.logger.debug(self.keyboard_layouts)
def prepare(self):
""" Prepare to become the current (visible) page. """
self._module = LocationModule()
self.locations = self._module.get_location_collection_items()
self.locations_items = [
sorted(langs, key=lambda d: d['language'])
for langs in self.locations
]
def store_values(self):
""" This must be implemented by subclasses """
raise NotImplementedError
def show_all_locations_cb(self, *args):
current_val = self._pages_data.location.show_all_locations
self._pages_data.location.show_all_locations = self.toggle_bool(current_val)
|
dd00/commandergenius
|
refs/heads/dd00
|
project/jni/python/src/Lib/test/test_dis.py
|
58
|
# Minimal tests for dis module
from test.test_support import run_unittest
import unittest
import sys
import dis
import StringIO
def _f(a):
print a
return 1
dis_f = """\
%-4d 0 LOAD_FAST 0 (a)
3 PRINT_ITEM
4 PRINT_NEWLINE
%-4d 5 LOAD_CONST 1 (1)
8 RETURN_VALUE
"""%(_f.func_code.co_firstlineno + 1,
_f.func_code.co_firstlineno + 2)
def bug708901():
for res in range(1,
10):
pass
dis_bug708901 = """\
%-4d 0 SETUP_LOOP 23 (to 26)
3 LOAD_GLOBAL 0 (range)
6 LOAD_CONST 1 (1)
%-4d 9 LOAD_CONST 2 (10)
12 CALL_FUNCTION 2
15 GET_ITER
>> 16 FOR_ITER 6 (to 25)
19 STORE_FAST 0 (res)
%-4d 22 JUMP_ABSOLUTE 16
>> 25 POP_BLOCK
>> 26 LOAD_CONST 0 (None)
29 RETURN_VALUE
"""%(bug708901.func_code.co_firstlineno + 1,
bug708901.func_code.co_firstlineno + 2,
bug708901.func_code.co_firstlineno + 3)
def bug1333982(x=[]):
assert 0, ([s for s in x] +
1)
pass
dis_bug1333982 = """\
%-4d 0 LOAD_CONST 1 (0)
3 JUMP_IF_TRUE 41 (to 47)
6 POP_TOP
7 LOAD_GLOBAL 0 (AssertionError)
10 BUILD_LIST 0
13 DUP_TOP
14 STORE_FAST 1 (_[1])
17 LOAD_FAST 0 (x)
20 GET_ITER
>> 21 FOR_ITER 13 (to 37)
24 STORE_FAST 2 (s)
27 LOAD_FAST 1 (_[1])
30 LOAD_FAST 2 (s)
33 LIST_APPEND
34 JUMP_ABSOLUTE 21
>> 37 DELETE_FAST 1 (_[1])
%-4d 40 LOAD_CONST 2 (1)
43 BINARY_ADD
44 RAISE_VARARGS 2
>> 47 POP_TOP
%-4d 48 LOAD_CONST 0 (None)
51 RETURN_VALUE
"""%(bug1333982.func_code.co_firstlineno + 1,
bug1333982.func_code.co_firstlineno + 2,
bug1333982.func_code.co_firstlineno + 3)
_BIG_LINENO_FORMAT = """\
%3d 0 LOAD_GLOBAL 0 (spam)
3 POP_TOP
4 LOAD_CONST 0 (None)
7 RETURN_VALUE
"""
class DisTests(unittest.TestCase):
def do_disassembly_test(self, func, expected):
s = StringIO.StringIO()
save_stdout = sys.stdout
sys.stdout = s
dis.dis(func)
sys.stdout = save_stdout
got = s.getvalue()
# Trim trailing blanks (if any).
lines = got.split('\n')
lines = [line.rstrip() for line in lines]
expected = expected.split("\n")
import difflib
if expected != lines:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff(expected,
lines)))
def test_opmap(self):
self.assertEqual(dis.opmap["STOP_CODE"], 0)
self.assertEqual(dis.opmap["LOAD_CONST"] in dis.hasconst, True)
self.assertEqual(dis.opmap["STORE_NAME"] in dis.hasname, True)
def test_opname(self):
self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST")
def test_boundaries(self):
self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
def test_dis(self):
self.do_disassembly_test(_f, dis_f)
def test_bug_708901(self):
self.do_disassembly_test(bug708901, dis_bug708901)
def test_bug_1333982(self):
# This one is checking bytecodes generated for an `assert` statement,
# so fails if the tests are run with -O. Skip this test then.
if __debug__:
self.do_disassembly_test(bug1333982, dis_bug1333982)
def test_big_linenos(self):
def func(count):
namespace = {}
func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
exec func in namespace
return namespace['foo']
# Test all small ranges
for i in xrange(1, 300):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
# Test some larger ranges too
for i in xrange(300, 5000, 10):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
def test_main():
run_unittest(DisTests)
if __name__ == "__main__":
test_main()
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/res/scripts/common/Lib/idlelib/PyShell.py
|
59
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import traceback
import types
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError as err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if sys.platform == "darwin":
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) is not str:
if not isinstance(s, str):
raise TypeError('must be str, not ' + type(s).__name__)
# See issue #19481
s = str.__str__(s)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
trek10inc/awsume
|
refs/heads/master
|
awsume/configure/autocomplete.py
|
1
|
import os, pathlib
BASH_AUTOCOMPLETE_SCRIPT = """
_awsume() {
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts=$(awsume-autocomplete)
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
}
complete -F _awsume awsume
"""
ZSH_AUTOCOMPLETE_SCRIPT = """
#Auto-Complete function for AWSume
fpath=(~/.awsume/zsh-autocomplete/ $fpath)
"""
ZSH_AUTOCOMPLETE_FUNCTION = """#compdef awsume
_arguments "*: :($(awsume-autocomplete))"
"""
FISH_AUTOCOMPLETE_SCRIPT = """
complete --command awsume --arguments '(awsume-autocomplete)'
"""
POWERSHELL_AUTOCOMPLETE_SCRIPT = """
Register-ArgumentCompleter -Native -CommandName awsume -ScriptBlock {
param($wordToComplete, $commandAst, $cursorPosition)
$(awsume-autocomplete) |
Where-Object { $_ -like "$wordToComplete*" } |
Sort-Object |
ForEach-Object {
[System.Management.Automation.CompletionResult]::new($_, $_, 'ParameterValue', $_)
}
}
"""
SCRIPTS = {
'bash': BASH_AUTOCOMPLETE_SCRIPT,
'zsh': ZSH_AUTOCOMPLETE_SCRIPT,
'powershell': POWERSHELL_AUTOCOMPLETE_SCRIPT,
'fish': FISH_AUTOCOMPLETE_SCRIPT,
}
def main(shell: str, autocomplete_file: str):
autocomplete_file = str(pathlib.Path(autocomplete_file).expanduser())
autocomplete_script = SCRIPTS[shell]
basedir = os.path.dirname(autocomplete_file)
if basedir and not os.path.exists(basedir):
os.makedirs(basedir)
open(autocomplete_file, 'a').close()
if autocomplete_script in open(autocomplete_file, 'r').read():
print('Autocomplete script already in ' + autocomplete_file)
else:
with open(autocomplete_file, 'a') as f:
f.write('\n#Auto-Complete function for AWSume')
f.write(autocomplete_script)
print('Wrote autocomplete script to ' + autocomplete_file)
# install autocomplete function if zsh
if shell == 'zsh':
zsh_autocomplete_function_file = str(pathlib.Path('~/.awsume/zsh-autocomplete/_awsume').expanduser())
basedir = os.path.dirname(zsh_autocomplete_function_file)
if basedir and not os.path.exists(basedir):
os.makedirs(basedir)
if not os.path.isfile(zsh_autocomplete_function_file):
open(zsh_autocomplete_function_file, 'w').close()
if ZSH_AUTOCOMPLETE_FUNCTION in open(zsh_autocomplete_function_file, 'r').read():
print('Zsh function already in ' + zsh_autocomplete_function_file)
else:
with open(zsh_autocomplete_function_file, 'a') as f:
f.write(ZSH_AUTOCOMPLETE_FUNCTION)
print('Wrote zsh function to ' + zsh_autocomplete_function_file)
|
skython/eXe
|
refs/heads/master
|
twisted/web/rewrite.py
|
14
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.web import resource
class RewriterResource(resource.Resource):
def __init__(self, orig, *rewriteRules):
resource.Resource.__init__(self)
self.resource = orig
self.rewriteRules = list(rewriteRules)
def _rewrite(self, request):
for rewriteRule in self.rewriteRules:
rewriteRule(request)
def getChild(self, path, request):
request.postpath.insert(0, path)
request.prepath.pop()
self._rewrite(request)
path = request.postpath.pop(0)
request.prepath.append(path)
return self.resource.getChildWithDefault(path, request)
def render(self, request):
self._rewrite(request)
return self.resource.render(request)
def tildeToUsers(request):
if request.postpath and request.postpath[0][:1]=='~':
request.postpath[:1] = ['users', request.postpath[0][1:]]
request.path = '/'+'/'.join(request.prepath+request.postpath)
def alias(aliasPath, sourcePath):
"""
I am not a very good aliaser. But I'm the best I can be. If I'm
aliasing to a Resource that generates links, and it uses any parts
of request.prepath to do so, the links will not be relative to the
aliased path, but rather to the aliased-to path. That I can't
alias static.File directory listings that nicely. However, I can
still be useful, as many resources will play nice.
"""
sourcePath = sourcePath.split('/')
aliasPath = aliasPath.split('/')
def rewriter(request):
if request.postpath[:len(aliasPath)] == aliasPath:
after = request.postpath[len(aliasPath):]
request.postpath = sourcePath + after
request.path = '/'+'/'.join(request.prepath+request.postpath)
return rewriter
|
comiconomenclaturist/Airtime
|
refs/heads/2.5.x
|
dev_tools/compare_cc_files_to_fs.py
|
10
|
import os
import time
import shutil
import sys
import logging
from configobj import ConfigObj
from subprocess import Popen, PIPE
from api_clients import api_client as apc
"""
The purpose of this script is that you can run it, and it will compare what the database has to what your filesystem
has. It will then report if there are any differences. It will *NOT* make any changes, unlike media-monitor which uses
similar code when it starts up (but then makes changes if something is different)
"""
class AirtimeMediaMonitorBootstrap():
"""AirtimeMediaMonitorBootstrap constructor
Keyword Arguments:
logger -- reference to the media-monitor logging facility
pe -- reference to an instance of ProcessEvent
api_clients -- reference of api_clients to communicate with airtime-server
"""
def __init__(self):
config = ConfigObj('/etc/airtime/airtime.conf')
self.api_client = apc.api_client_factory(config)
"""
try:
logging.config.fileConfig("logging.cfg")
except Exception, e:
print 'Error configuring logging: ', e
sys.exit(1)
"""
self.logger = logging.getLogger()
self.logger.info("Adding %s on watch list...", "xxx")
self.scan()
"""On bootup we want to scan all directories and look for files that
weren't there or files that changed before media-monitor process
went offline.
"""
def scan(self):
directories = self.get_list_of_watched_dirs();
self.logger.info("watched directories found: %s", directories)
for id, dir in directories.iteritems():
self.logger.debug("%s, %s", id, dir)
#CHANGED!!!
#self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8"))
self.sync_database_to_filesystem(id, dir)
"""Gets a list of files that the Airtime database knows for a specific directory.
You need to provide the directory's row ID, which is obtained when calling
get_list_of_watched_dirs function.
dir_id -- row id of the directory in the cc_watched_dirs database table
"""
def list_db_files(self, dir_id):
return self.api_client.list_all_db_files(dir_id)
"""
returns the path and the database row id for this path for all watched directories. Also
returns the Stor directory, which can be identified by its row id (always has value of "1")
"""
def get_list_of_watched_dirs(self):
json = self.api_client.list_all_watched_dirs()
return json["dirs"]
def scan_dir_for_existing_files(self, dir):
command = 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' % dir.replace('"', '\\"')
self.logger.debug(command)
#CHANGED!!
stdout = self.exec_command(command).decode("UTF-8")
return stdout.splitlines()
def exec_command(self, command):
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
self.logger.warn("command \n%s\n return with a non-zero return value", command)
self.logger.error(stderr)
return stdout
"""
This function takes in a path name provided by the database (and its corresponding row id)
and reads the list of files in the local file system. Its purpose is to discover which files
exist on the file system but not in the database and vice versa, as well as which files have
been modified since the database was last updated. In each case, this method will call an
appropiate method to ensure that the database actually represents the filesystem.
dir_id -- row id of the directory in the cc_watched_dirs database table
dir -- pathname of the directory
"""
def sync_database_to_filesystem(self, dir_id, dir):
"""
set to hold new and/or modified files. We use a set to make it ok if files are added
twice. This is because some of the tests for new files return result sets that are not
mutually exclusive from each other.
"""
db_known_files_set = set()
files = self.list_db_files(dir_id)
for file in files['files']:
db_known_files_set.add(file)
existing_files = self.scan_dir_for_existing_files(dir)
existing_files_set = set()
for file_path in existing_files:
if len(file_path.strip(" \n")) > 0:
existing_files_set.add(file_path[len(dir):])
deleted_files_set = db_known_files_set - existing_files_set
new_files_set = existing_files_set - db_known_files_set
print ("DB Known files: \n%s\n\n"%len(db_known_files_set))
print ("FS Known files: \n%s\n\n"%len(existing_files_set))
print ("Deleted files: \n%s\n\n"%deleted_files_set)
print ("New files: \n%s\n\n"%new_files_set)
if __name__ == "__main__":
AirtimeMediaMonitorBootstrap()
|
zachjanicki/osf.io
|
refs/heads/develop
|
framework/utils.py
|
65
|
from __future__ import absolute_import
import re
from werkzeug.utils import secure_filename as werkzeug_secure_filename
def iso8601format(dt):
"""Given a datetime object, return an associated ISO-8601 string"""
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') if dt else ''
def secure_filename(filename):
"""Return a secure version of a filename.
Uses ``werkzeug.utils.secure_filename``, but explicitly allows for leading
underscores.
:param filename str: A filename to sanitize
:return: Secure version of filename
"""
secure = werkzeug_secure_filename(filename)
# Check for leading underscores, and add them back in
try:
secure = re.search('^_+', filename).group() + secure
except AttributeError:
pass
return secure
|
LeBarbouze/tunacell
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# tuna documentation build configuration file, created by
# sphinx-quickstart on Sun May 7 19:17:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tunacell'
copyright = u'2017-2019, Joachim Rambeau'
author = u'Joachim Rambeau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
from tunacell import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tunadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tunacell.tex', u'tunacell Documentation',
u'Joachim Rambeau', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tunacell', u'tunacell Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tunacell', u'tunacell Documentation',
author, 'tunacell', 'One line description of project.',
'Miscellaneous'),
]
|
TheTypoMaster/my-vim-set-mac
|
refs/heads/master
|
.vim/bundle/YouCompleteMe/third_party/ycmd/cpp/ycm/tests/gmock/scripts/generator/cpp/gmock_class_test.py
|
51
|
#!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
|
streamlink/streamlink
|
refs/heads/master
|
tests/plugins/test_rtve.py
|
3
|
from streamlink.plugins.rtve import Rtve
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlRtve(PluginCanHandleUrl):
__plugin__ = Rtve
should_match = [
'http://www.rtve.es/directo/la-1',
'http://www.rtve.es/directo/la-2/',
'http://www.rtve.es/directo/teledeporte/',
'http://www.rtve.es/directo/canal-24h/',
'http://www.rtve.es/infantil/directo/',
]
should_not_match = [
'https://www.rtve.es',
]
|
bionomicron/Redirector
|
refs/heads/master
|
core/reader/FluxModelParser.py
|
1
|
#!/usr/bin/env python
'''
@author: Graham Rockwell
@organization: Church Lab Harvard Genetics
@version: 03/04/2013
--Rename to metabolic-model parser
'''
from core.do.FluxModel import FluxModel, MetabolicNetwork, Reaction, Objective, FluxLimit
from core.do.FluxModelTools import ReactionParser
from util.FlatFileParser import FlatFileParser
class TagedElement(dict):
def __init__(self):
self.annotation = {}
def __str__(self):
result = dict.__str__(self)
result += self.annotation.__str__()
class SmallMoleculeFlatFileParser( FlatFileParser ):
'''
@summary: Parser for compound / small molecule flat files
'''
def __init__(self,delimiter='\t',comment='#'):
FlatFileParser.__init__( self, delimiter, comment )
requiredHeaders = {"Abbreviation":"Name", "OfficialName":"OfficialName"}
optionalHeaders = {"concentration":"concentration","lower":"lower","upper":"upper","compartment":"compartment"}
self.addRequiredHeaders( requiredHeaders )
self.addOptionalHeaders( optionalHeaders )
def parse( self, file ):
result = {} #currently reaction are parsed in an unordered form.
self.startFile(file)
d = self.getTagedLine()
while d != None:
if d != "":
m= {}
name = d["Name"]
if d["compartment"] != '':
name = name + "[" + d["compartment"] + "]"
m["name"] = name
m["officialName"] = d["OfficialName"]
for term in self.optionalHeaderMap.keys():
if term in d.keys():
m[term] = d[term]
d = self.getTagedLine()
result[name] = m
self.closeFile()
return result
class ReactionFlatFileParser( FlatFileParser ):
'''
@summary: Parser for reaction flat files
'''
def __init__( self, delimiter='\t', comment='#' ):
FlatFileParser.__init__( self, delimiter, comment )
requiredHeaders = {"Abbreviation":"Name", "OfficialName":"OfficialName", "Equation":"Equation"}
optionalHeaders = {}
self.addRequiredHeaders( requiredHeaders )
self.addOptionalHeaders( optionalHeaders )
def parse( self, rxnfile ):
rxnParser = ReactionParser()
result = {} #currently reaction are parsed in an unordered form.
self.startFile(rxnfile)
d = self.getTagedLine()
while d != None:
if d != "":
reaction = rxnParser.parseEquation(d["Equation"])
reaction.setId(d["Name"])
reaction.setName(d["OfficialName"])
reaction.setEquation(d["Equation"])
reaction.annotation = d.annotation
result[d["Name"]] = reaction
d = self.getTagedLine()
self.closeFile()
return result
class MetabolicNetworkFlatFileParser( FlatFileParser ):
'''
@summary: parser for metabolic network files
@note: currently only reaction file matters
'''
def parse( self, rxnfile, metabolitefile = ''):
precursor_predicate = {}
rxnParser = ReactionFlatFileParser()
smmParser = SmallMoleculeFlatFileParser()
#smm = smmParser.parse( metabolitefile )
rxn = rxnParser.parse( rxnfile )
result = MetabolicNetwork( 'MetabolicNetwork' )
result.addReactionMap( rxn )
return result
class FluxLimitFlatFileParser( FlatFileParser ):
'''
@summary: parser for reaction limit flat file
'''
def __init__( self, delimiter='\t', comment='#' ):
FlatFileParser.__init__( self, delimiter, comment )
self.setHeader( ["Flux", "Lower", "Upper"] )
self.noLimit = ['None']
self.posInf = 1e3
self.negInf = -1e3
def parse( self, fluxlimitfile ):
fluxLimit = FluxLimit()
lines = open( fluxlimitfile, 'r' )
isHeader = True
for line in lines:
if not self.isComment( line ):
if isHeader:
self.checkHeader( line )
isHeader = False
else:
d = self.parseTagedLine( line )
lower = d["Lower"]
upper = d["Upper"]
if lower in self.noLimit:
lower = None
elif self.negInf != None:
if float(lower) < self.negInf:
lower = None
if upper in self.noLimit:
upper = None
elif self.posInf != None:
if float(upper) > self.posInf:
upper = None
fluxLimit[d["Flux"]] = ( lower, upper )
return fluxLimit
class ObjectiveFlatFileParser( FlatFileParser ):
'''
@summary: Parser for objective flat file
'''
def __init__( self, delimiter='\t', comment='#' ):
FlatFileParser.__init__( self, delimiter, comment )
self.setHeader( ["Flux", "Coefficient"] )
def parse( self, objectivefile ):
objective = Objective()
lines = open( objectivefile, 'r' )
isHeader = True
for line in lines:
if not self.isComment( line ):
if isHeader:
self.checkHeader( line )
isHeader = False
else:
d = self.parseTagedLine( line )
objective[d["Flux"]] = d["Coefficient"]
return objective
#===========================================================
#==========FluxAnalysisFlatFileParser=======================
#===========================================================
class FluxAnalysisFlatFileParser( FlatFileParser ):
def __init__( self, delimiter='\t', comment='#' ):
FlatFileParser.__init__( self, delimiter, comment )
self.reactionParser = ReactionParser()
requiredHeaders = {
"Abbreviation":"name",
"Equation":"equation",
"Lower Bound":"lowerBound",
"Upper Bound":"upperBound"
}
optionalHeaders = {"objective":"objective"}
self.addRequiredHeaders( requiredHeaders )
self.addOptionalHeaders( optionalHeaders )
self.setObjectiveTag( "objective function" )
def setObjectiveTag( self, objectiveTag ):
self.objectiveTag = objectiveTag
def parseReactionLine( self, line ):
name = line["name"]
equation = line["equation"]
reaction = self.reactionParser.parse( name, "", "", equation )
return reaction
def parseLimitLine( self, line ):
lower = line["lowerBound"]
upper = line["upperBound"]
return ( lower, upper )
def parseObjectiveLine( self, line ):
return -1
def parse( self, analysisFile ):
lines = self.parseFile( analysisFile )
objective = Objective()
limit = FluxLimit()
reactions = {}
for line in lines:
name = line["name"]
reaction = Reaction()
reaction = self.parseReactionLine( line )
reactions[name] = reaction
if line["objective"] == self.objectiveTag:
objectiveValue = self.parseObjectiveLine( line )
objective[name] = objectiveValue
else:
limitValue = self.parseLimitLine( line )
if reaction.direction == 'IRREVERSIBLE-LEFT-TO-RIGHT':
( lower, upper ) = limitValue
limitValue = ( 0.0, upper )
limit[name] = limitValue
return ( reactions, objective, limit )
#============================================================
#================FluxModelFlatFileParser=====================
#============================================================
class FluxModelFlatFileParser( FlatFileParser ):
def __init__(self):
self.directionCheck = True
self.posInf = 1e5
self.negInf = -1e5
self.networkParser = MetabolicNetworkFlatFileParser()
self.exchangeParser = FluxAnalysisFlatFileParser()
self.fluxLimitParser = FluxLimitFlatFileParser()
self.objectiveParser = ObjectiveFlatFileParser()
self.smmParser = SmallMoleculeFlatFileParser()
self.annotationParser = AnnotationFlatFileParser()
self.modelName = ''
self.basedir = ''
self.rxnfile = ''
self.fluxlimitfile = ''
self.smmfile = ''
self.objectivefile = ''
self.geneannotationfile = ''
def checkDirectionality(self,network,fluxLimits):
for reactionName in network.getOrderedReactionNames():
reaction = network.getReaction(reactionName)
if reactionName in fluxLimits.keys():
limit = fluxLimits[reactionName]
else:
#limit = (self.negInf,self.posInf)
limit = (None,None)
(lower,upper) = limit
direction = reaction.getDirection()
if reaction.getDirection() == 'IRREVERSIBLE-LEFT-TO-RIGHT':
if lower == None:
limit = (0.0,upper)
elif lower < 0.0:
limit = (0.0,upper)
if reaction.getDirection() == 'IRREVERSIBLE-RIGHT-TO-LEFT':
if upper == None:
limit = (lower,0.0)
elif upper > 0.0:
limit = (lower,0.0)
fluxLimits[reactionName] = limit
return fluxLimits
def parse( self, rxnfile, metabolitefile, fluxlimitfile, objectivefile, analysisName, exchangeFile = None):
#networkParser = MetabolicNetworkFlatFileParser()
network = self.networkParser.parse( rxnfile, metabolitefile )
fluxModel = FluxModel( network )
if fluxlimitfile != '' and fluxlimitfile != None:
fluxLimits= self.fluxLimitParser.parse( fluxlimitfile )
else:
fluxLimits = FluxLimit()
if objectivefile != '' and objectivefile != None:
objective= self.objectiveParser.parse( objectivefile )
else:
objective = Objective()
if exchangeFile != '' and exchangeFile != None:
( reactions, objective, limit ) = self.exchangeParser.parse(exchangeFile)
network.addReactionMap(reactions)
fluxLimits.extend(limit)
if self.directionCheck:
fluxLimits = self.checkDirectionality(network, fluxLimits)
fluxModel.addAnalysis( analysisName, objective, fluxLimits )
return fluxModel
def generateModel(self,modelName=None):
if modelName == None:
modelName = self.modelName
dir = self.basedir
network = None
fluxLimits = None
objective = None
metaboliteData = None
if self.rxnfile != '':
rxnFile =dir + self.rxnfile
network = self.networkParser.parse(rxnFile)
else:
network = MetabolicNetwork('')
if self.smmfile != '':
smmFile = dir + self.smmfile
metaboliteData = self.smmParser.parse(smmFile)
if self.fluxlimitfile != '':
fluxLimitFile = dir + self.fluxlimitfile
fluxLimits = self.fluxLimitParser.parse(fluxLimitFile)
else:
fluxLimits = FluxLimit()
if self.objectivefile != '':
objectiveFile = dir + self.objectivefile
objective = self.objectiveParser.parse(objectiveFile)
else:
objective = Objective()
if self.directionCheck:
fluxLimits = self.checkDirectionality(network, fluxLimits)
#Add data to collector
if network != None:
result = FluxModel(network)
if metaboliteData != None:
result.setMetaboliteData(metaboliteData)
if objective != None and fluxLimits != None:
result.addAnalysis( modelName, objective, fluxLimits )
return result
def parseGenConfig(self,config,modelName):
dir = config["basedir"]
network = None
fluxLimits = None
smmData = None
objective = None
metaboliteData = None
if config["rxnfile"] != '':
rxnFile =dir + config["rxnfile"]
network = self.networkParser.parse(rxnFile)
else:
network = MetabolicNetwork('')
if config["smmfile"] != '':
smmFile = dir + config["smmfile"]
metaboliteData = self.smmParser.parse(smmFile)
if config["fluxlimitfile"] != '':
fluxLimitFile = dir + config["fluxlimitfile"]
fluxLimits = self.fluxLimitParser.parse(fluxLimitFile)
else:
fluxLimits = FluxLimit()
if config["objectivefile"] != '':
objectiveFile = dir + config["objectivefile"]
objective = self.objectiveParser.parse(objectiveFile)
else:
objective = Objective()
if config["exchangefile"] != '':
exchangeFile = dir + config["exchangefile"]
( reactions, objective, limit ) = self.exchangeParser.parse(exchangeFile)
network.addReactionMap(reactions)
fluxLimits.extend(limit)
if self.directionCheck:
fluxLimits = self.checkDirectionality(network, fluxLimits)
#Add data to collector
if network != None:
result = FluxModel(network)
if metaboliteData != None:
result.setMetaboliteData(metaboliteData)
if objective != None and fluxLimits != None:
result.addAnalysis( modelName, objective, fluxLimits )
return result
def parseConfig(self,config,modelName):
rxnFile =config.getRxnFile()
smmFile = config.getSmmFile()
limitFile = config.getFluxLimitFile()
objectiveFile = config.getObjectiveFile()
exchangeFile = config.getExchangeFile()
fluxModel = self.parse(rxnFile,smmFile,limitFile,objectiveFile,modelName,exchangeFile)
return fluxModel
def parseMultipleAnalysis( self, rxnfile, metabolitefile, fluxlimitfiles, objectivefiles, analysisNames ):
networkParser = MetabolicNetworkFlatFileParser()
network = networkParser.parse( rxnfile, metabolitefile )
fluxModel = FluxModel( network )
fluxLimits = {}
objective = {}
analysis_names = []
fluxLimitParser = FluxLimitFlatFileParser()
objectiveParser = ObjectiveFlatFileParser()
for analysisName in analysisNames:
fluxlimitfile = fluxlimitfiles[analysisName]
objectivefile = objectivefiles[analysisName]
fluxLimits= fluxLimitParser.parse( fluxlimitfile )
objective= objectiveParser.parse( objectivefile )
fluxModel.addAnalysis( analysisName, objective, fluxLimits )
return fluxModel
def parseSuperCross( self, rxnfile, metabolitefile, fluxlimitfiles, objectivefiles ):
analysisNames = []
newFluxLimitFileList = {}
newObjectiveFileList = {}
for objectivefile in objectivefiles:
for limitfile in fluxlimitfiles:
name = objectivefile + "-" + limitfile
analysisNames.append( name )
newFluxLimitFileList[name] = limitfile
newObjectiveFileList[name] = objectivefile
fluxModel = self.parseMultipleAnalyis( self, rxnfile, metabolitefile, newFluxLimitFileList, newObjectiveFileList )
return fluxModel
class AnnotationFlatFileParser( FlatFileParser ):
def __init__( self, delimiter='\t', comment='#' ):
FlatFileParser.__init__( self, delimiter, comment )
requiredHeaders = {"bNumber":"Name", "gene":"gene", "function":"function"}
optionalHeaders = {}
self.addRequiredHeaders( requiredHeaders )
self.addOptionalHeaders( optionalHeaders )
def parse( self, file ):
result = {}
self.startFile(file)
d = self.getTagedLine()
while d != None:
if d != "":
result[d["Name"]] = [d["gene"],d["function"]]
d = self.getTagedLine()
self.closeFile()
return result
#=====================================
#=============ParseError==============
#=====================================
class ParseError( Exception ):
pass
|
cg31/tensorflow
|
refs/heads/master
|
tensorflow/python/training/session_manager_test.py
|
11
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
class SessionManagerTest(tf.test.TestCase):
def testPrepareSessionSucceeds(self):
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
sess = sm.prepare_session("", init_op=tf.initialize_all_variables())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with tf.Graph().as_default():
p = tf.placeholder(tf.float32, shape=(3,))
v = tf.Variable(p, name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
sess = sm.prepare_session("",
init_op=tf.initialize_all_variables(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with tf.Graph().as_default():
v = tf.Variable([125], name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
sess = sm.prepare_session("",
init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
sess = sm.prepare_session("", init_op=tf.initialize_all_variables(),
saver=saver, checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = tf.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session("", init_op=None, saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True, max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session("", init_op=None, saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True, max_wait_secs=2)
self.assertEqual(
True, tf.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
sess, initialized = sm.recover_session("", saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
v = tf.Variable(2, name="v")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
sess, initialized = sm2.recover_session("", saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True, tf.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with tf.Graph().as_default():
tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(ValueError,
"If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
tf.train.SessionManager(
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=None)
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
v = tf.Variable(2, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
saver = tf.train.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=tf.report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.report_uninitialized_variables())
saver = tf.train.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
v = tf.Variable(2, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = tf.train.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
False,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with tf.Graph().as_default():
w = tf.Variable(
1,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(w))
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
v = tf.Variable(2, name="v")
w = tf.Variable(
1,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = tf.train.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(w))
def testWaitForSessionLocalInit(self):
server = tf.train.Server.create_local_server()
with tf.Graph().as_default() as graph:
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = tf.train.SessionManager(
graph=graph,
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = tf.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with tf.Graph().as_default() as graph:
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = tf.train.SessionManager(
graph=graph,
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(tf.errors.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with tf.Graph().as_default() as graph:
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = tf.train.SessionManager(
graph=graph,
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(tf.errors.FailedPreconditionError,
"Attempting to use uninitialized value v"):
sm.wait_for_session("", max_wait_secs=3)
def testPrepareSessionWithReadyForLocalInitOp(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testPrepareSessionDidNotInitLocalVariable(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionWithReadyNotReadyForLocal(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(tf.errors.FailedPreconditionError,
"Attempting to use uninitialized value v"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(tf.test.TestCase):
def testPrepareSessionSucceeds(self):
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
sess = sm.prepare_session("", init_op=tf.initialize_all_variables())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with tf.Graph().as_default():
p = tf.placeholder(tf.float32, shape=(3,))
v = tf.Variable(p, name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
sess = sm.prepare_session("",
init_op=tf.initialize_all_variables(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with tf.Graph().as_default():
v = tf.Variable([125], name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
sess = sm.prepare_session("",
init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
saver = tf.train.Saver({"v": v})
sess = sm.prepare_session("", init_op=tf.initialize_all_variables(),
saver=saver, checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = tf.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
saver = tf.train.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session("", init_op=None, saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True, max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session("", init_op=None, saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True, max_wait_secs=2)
self.assertEqual(
True, tf.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
saver = tf.train.Saver({"v": v})
sess, initialized = sm.recover_session("", saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with tf.Graph().as_default():
v = tf.Variable(2, name="v")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
sm2 = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
saver = tf.train.Saver({"v": v})
sess, initialized = sm2.recover_session("", saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True, tf.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with tf.Graph().as_default():
tf.Variable(1, name="v")
sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
tf.test.main()
|
merivercap/bitshares-2-ui
|
refs/heads/bitshares
|
docs/conf.py
|
11
|
# -*- coding: utf-8 -*-
#
# Graphene UI documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 8 19:12:18 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Graphene UI'
copyright = u'2016, Sigve Kvalsvik'
author = u'Sigve Kvalsvik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.0.160203'
# The full version, including alpha/beta/rc tags.
release = u'2.0.160203'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GrapheneGUIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GrapheneGUI.tex', u'Graphene UI Documentation',
u'Sigve Kvalsvik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'graphenegui', u'Graphene UI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GrapheneGUI', u'Graphene UI Documentation',
author, 'GrapheneGUI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
enthought/etsproxy
|
refs/heads/master
|
enthought/chaco/lasso_overlay.py
|
1
|
# proxy module
from __future__ import absolute_import
from chaco.lasso_overlay import *
|
jhuapl-marti/marti
|
refs/heads/master
|
crits/comments/urls.py
|
17
|
from django.conf.urls import patterns
urlpatterns = patterns('crits.comments.views',
(r'^remove/(?P<obj_id>\S+)/$', 'remove_comment'),
(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', 'add_update_comment'),
(r'^activity/$', 'activity'),
(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$', 'activity'),
(r'^activity/get_new_comments/$', 'get_new_comments'),
(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$', 'comment_search'),
(r'^list/$', 'comments_listing'),
(r'^list/(?P<option>\S+)/$', 'comments_listing'),
)
|
jollaman999/jolla-kernel_G_Gen3-Stock
|
refs/heads/test1
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
YunoHost/apps
|
refs/heads/master
|
i18n-tools/generate_translation_file.py
|
3
|
import os
import sys
import json
if __name__ == '__main__':
if os.path.exists("locales/en.json"):
print "This script should be run only once, the first time to generate locales/, after that you should use update_translations.py"
print "Abort"
sys.exit(1)
other_langs = {}
keys = []
en = {}
for builded_file in sys.argv[1:]:
builded_file = json.load(open(builded_file, "r"))
for app, data in builded_file.items():
if "en" in data["manifest"]["description"]:
key = "%s_manifest_description" % app
en[key] = data["manifest"]["description"]["en"]
keys.append(key)
for i in data["manifest"]["description"]:
if i not in other_langs:
other_langs[i] = {x: "" for x in keys}
for i, translations in other_langs.items():
translations[key] = data["manifest"]["description"].get(i, "")
for category, questions in data["manifest"]["arguments"].items():
for question in questions:
if "en" not in question["ask"]:
continue
key = "%s_manifest_arguments_%s_%s" % (app, category, question["name"])
en[key] = question["ask"]["en"]
keys.append(key)
for i in question["ask"]:
if i not in other_langs:
other_langs[i] = {x: "" for x in keys}
for i, translations in other_langs.items():
translations[key] = question["ask"].get(i, "")
if not os.path.exists("locales"):
os.makedirs("locales")
open("locales/en.json", "w").write(json.dumps(en, sort_keys=True, indent=4))
for i, translations in other_langs.items():
open("locales/%s.json" % i, "w").write(json.dumps(translations, sort_keys=True, indent=4))
|
tedelhourani/ansible
|
refs/heads/devel
|
examples/scripts/uptime.py
|
17
|
#!/usr/bin/env python
from collections import namedtuple
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory.manager import InventoryManager
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.play import Play
from ansible.plugins.callback import CallbackBase
from ansible.vars.manager import VariableManager
# Create a callback object so we can capture the output
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
def main():
host_list = ['localhost', 'www.example.com', 'www.google.com']
Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'remote_user',
'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args',
'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check',
'diff'])
# initialize needed objects
loader = DataLoader()
options = Options(connection='smart', module_path='/usr/share/ansible', forks=100,
remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None,
sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None,
become_user=None, verbosity=None, check=False, diff=False)
passwords = dict()
# create inventory and pass to var manager
inventory = InventoryManager(loader=loader, sources=','.join(host_list))
variable_manager = VariableManager(loader=loader, inventory=inventory)
# create play with tasks
play_source = dict(
name="Ansible Play",
hosts=host_list,
gather_facts='no',
tasks=[dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime')))]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
# actually run it
tqm = None
callback = ResultsCollector()
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
tqm._stdout_callback = callback
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
print("UP ***********")
for host, result in callback.host_ok.items():
print('{} >>> {}'.format(host, result._result['stdout']))
print("FAILED *******")
for host, result in callback.host_failed.items():
print('{} >>> {}'.format(host, result._result['msg']))
print("DOWN *********")
for host, result in callback.host_unreachable.items():
print('{} >>> {}'.format(host, result._result['msg']))
if __name__ == '__main__':
main()
|
goliveirab/odoo
|
refs/heads/8.0
|
addons/hr_holidays/report/hr_holidays_report.py
|
341
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class hr_holidays_remaining_leaves_user(osv.osv):
_name = "hr.holidays.remaining.leaves.user"
_description = "Total holidays by type"
_auto = False
_columns = {
'name': fields.char('Employee'),
'no_of_leaves': fields.integer('Remaining leaves'),
'user_id': fields.many2one('res.users', 'User'),
'leave_type': fields.char('Leave Type'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_holidays_remaining_leaves_user')
cr.execute("""
CREATE or REPLACE view hr_holidays_remaining_leaves_user as (
SELECT
min(hrs.id) as id,
rr.name as name,
sum(hrs.number_of_days) as no_of_leaves,
rr.user_id as user_id,
hhs.name as leave_type
FROM
hr_holidays as hrs, hr_employee as hre,
resource_resource as rr,hr_holidays_status as hhs
WHERE
hrs.employee_id = hre.id and
hre.resource_id = rr.id and
hhs.id = hrs.holiday_status_id
GROUP BY
rr.name,rr.user_id,hhs.name
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CCI-MOC/nova
|
refs/heads/k2k-liberty
|
nova/spice/__init__.py
|
72
|
#!/usr/bin/env python
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for SPICE Proxying."""
from oslo_config import cfg
spice_opts = [
cfg.StrOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help='Location of spice HTML5 console proxy, in the form '
'"http://127.0.0.1:6082/spice_auto.html"'),
cfg.StrOpt('server_listen',
default='127.0.0.1',
help='IP address on which instance spice server should listen'),
cfg.StrOpt('server_proxyclient_address',
default='127.0.0.1',
help='The address to which proxy clients '
'(like nova-spicehtml5proxy) should connect'),
cfg.BoolOpt('enabled',
default=False,
help='Enable spice related features'),
cfg.BoolOpt('agent_enabled',
default=True,
help='Enable spice guest agent support'),
cfg.StrOpt('keymap',
default='en-us',
help='Keymap for spice'),
]
CONF = cfg.CONF
CONF.register_opts(spice_opts, group='spice')
|
Eziohao/DV-router
|
refs/heads/master
|
node_modules/bellman-ford/node_modules/node-gyp/gyp/pylib/gyp/win_tool.py
|
231
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
from ctypes import windll, wintypes
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class LinkLock(object):
"""A flock-style lock to limit the number of concurrent links to one.
Uses a session-local mutex based on the file's directory.
"""
def __enter__(self):
name = 'Local\\%s' % BASE_DIR.replace('\\', '_').replace(':', '_')
self.mutex = windll.kernel32.CreateMutexW(
wintypes.c_int(0),
wintypes.c_int(0),
wintypes.create_unicode_buffer(name))
assert self.mutex
result = windll.kernel32.WaitForSingleObject(
self.mutex, wintypes.c_int(0xFFFFFFFF))
# 0x80 means another process was killed without releasing the mutex, but
# that this process has been given ownership. This is fine for our
# purposes.
assert result in (0, 0x80), (
"%s, %s" % (result, windll.kernel32.GetLastError()))
def __exit__(self, type, value, traceback):
windll.kernel32.ReleaseMutex(self.mutex)
windll.kernel32.CloseHandle(self.mutex)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
with LinkLock():
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return popen.returncode
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
args = open(rspfile).read()
dir = dir[0] if dir else None
popen = subprocess.Popen(args, shell=True, env=env, cwd=dir)
popen.wait()
return popen.returncode
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
pedrobaeza/odoo
|
refs/heads/master
|
addons/mass_mailing/models/mail_thread.py
|
65
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from openerp.addons.mail.mail_message import decode
from openerp.addons.mail.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias in email_to:
bounce_match = tools.bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if self._all_columns.get('message_bounce'):
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
hackBCA/missioncontrol
|
refs/heads/master
|
application/mod_admin/controllers.py
|
1
|
from application import CONFIG, app
from application.mod_user.models import *
from application.mod_user.controllers import get_user
import re
import sendgrid
import bcrypt
import time
UserExistsError = Exception("UserExistsError", "Email already exists in database")
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
def get_users():
entries = StaffUserEntry.objects()
return entries
def add_user(email, firstname, lastname, password, roles):
existingUser = get_user(email)
if existingUser is not None:
raise UserExistsError
hashed = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt())
new_entry = StaffUserEntry(email = email, hashed = hashed, firstname = firstname, lastname = lastname, roles = roles)
new_entry.save()
def edit_user(firstname, lastname, email, roles):
existingUser = get_user(email)
existingUser.firstname = firstname
existingUser.lastname = lastname
existingUser.email = email
existingUser.roles = roles
existingUser.save()
def delete_user(email):
existingUser = get_user(email)
existingUser.delete()
def get_user_by_id(uid):
entries = StaffUserEntry.objects(id = uid)
if entries.count() == 1:
return entries[0]
return None
|
HenriWahl/Nagstamon
|
refs/heads/master
|
Nagstamon/Servers/__init__.py
|
1
|
# encoding: utf-8
# Nagstamon - Nagios status monitor for your desktop
# Copyright (C) 2008-2021 Henri Wahl <henri@nagstamon.de> et al.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""Module Servers"""
import urllib.request
import urllib.error
import urllib.parse
from collections import OrderedDict
# we need this for uncommon modules
try:
import icinga2api
icinga2api_is_available = True
except:
icinga2api_is_available = False
# load all existing server types
from Nagstamon.Servers.Nagios import NagiosServer
from Nagstamon.Servers.Centreon import CentreonServer
from Nagstamon.Servers.Icinga import IcingaServer
from Nagstamon.Servers.IcingaWeb2 import IcingaWeb2Server
from Nagstamon.Servers.Multisite import MultisiteServer
from Nagstamon.Servers.op5Monitor import Op5MonitorServer
from Nagstamon.Servers.Opsview import OpsviewServer
from Nagstamon.Servers.Thruk import ThrukServer
from Nagstamon.Servers.Zabbix import ZabbixServer
from Nagstamon.Servers.ZabbixProblemBased import ZabbixProblemBasedServer
from Nagstamon.Servers.Livestatus import LivestatusServer
from Nagstamon.Servers.Zenoss import ZenossServer
from Nagstamon.Servers.Monitos3 import Monitos3Server
from Nagstamon.Servers.Monitos4x import Monitos4xServer
from Nagstamon.Servers.SnagView3 import SnagViewServer
from Nagstamon.Servers.Sensu import SensuServer
from Nagstamon.Servers.SensuGo import SensuGoServer
from Nagstamon.Servers.Prometheus import PrometheusServer
from Nagstamon.Servers.Alertmanager import AlertmanagerServer
from Nagstamon.Config import conf
from Nagstamon.Helpers import STATES
# conditional load if module is available
if icinga2api_is_available is True:
from Nagstamon.Servers.Icinga2API import Icinga2APIServer
# dictionary for servers
servers = OrderedDict()
# contains dict with available server classes
# key is type of server, value is server class
# used for automatic config generation
# and holding this information in one place
SERVER_TYPES = OrderedDict()
def register_server(server):
"""
Once new server class is created, should be registered with this function
for being visible in config and accessible in application.
"""
if server.TYPE not in SERVER_TYPES:
SERVER_TYPES[server.TYPE] = server
def get_enabled_servers():
"""
list of enabled servers which connections outside should be used to check
"""
return([x for x in servers.values() if x.enabled is True])
def get_worst_status():
"""
get worst status of all servers
"""
worst_status = 'UP'
for server in get_enabled_servers():
worst_status_current = server.get_worst_status_current()
if STATES.index(worst_status_current) > STATES.index(worst_status):
worst_status = worst_status_current
del worst_status_current
return worst_status
def get_status_count():
"""
get all states of all servers and count them
"""
state_count = {'UNKNOWN': 0,
'INFORMATION': 0,
'WARNING': 0,
'AVERAGE': 0,
'HIGH': 0,
'CRITICAL': 0,
'DISASTER': 0,
'UNREACHABLE': 0,
'DOWN': 0}
for server in get_enabled_servers():
state_count['UNKNOWN'] += server.unknown
state_count['INFORMATION'] += server.information
state_count['WARNING'] += server.warning
state_count['AVERAGE'] += server.average
state_count['HIGH'] += server.high
state_count['CRITICAL'] += server.critical
state_count['DISASTER'] += server.disaster
state_count['UNREACHABLE'] += server.unreachable
state_count['DOWN'] += server.down
return(state_count)
def get_errors():
"""
find out if any server has any error, used by statusbar error label
"""
for server in get_enabled_servers():
if server.has_error:
return True
break
# return default value
return False
def create_server(server=None):
# create Server from config
if server.type not in SERVER_TYPES:
print(('Server type not supported: %s' % server.type))
return
# give argument servername so CentreonServer could use it for initializing MD5 cache
new_server = SERVER_TYPES[server.type](name=server.name)
new_server.type = server.type
new_server.enabled = server.enabled
new_server.monitor_url = server.monitor_url
new_server.monitor_cgi_url = server.monitor_cgi_url
new_server.username = server.username
new_server.password = server.password
new_server.use_proxy = server.use_proxy
new_server.use_proxy_from_os = server.use_proxy_from_os
new_server.proxy_address = server.proxy_address
new_server.proxy_username = server.proxy_username
new_server.proxy_password = server.proxy_password
new_server.authentication = server.authentication
new_server.timeout = server.timeout
# SSL/TLS
new_server.ignore_cert = server.ignore_cert
new_server.custom_cert_use = server.custom_cert_use
new_server.custom_cert_ca_file = server.custom_cert_ca_file
# ECP authentication
new_server.idp_ecp_endpoint = server.idp_ecp_endpoint
# if password is not to be saved ask for it at startup
if (server.enabled is True and server.save_password is False and
server.use_autologin is False):
new_server.refresh_authentication = True
# Special FX
# Centreon
new_server.use_autologin = server.use_autologin
new_server.autologin_key = server.autologin_key
# Icinga
new_server.use_display_name_host = server.use_display_name_host
new_server.use_display_name_service = server.use_display_name_service
# IcingaWeb2
new_server.no_cookie_auth = server.no_cookie_auth
# Checkmk Multisite
new_server.force_authuser = server.force_authuser
new_server.checkmk_view_hosts = server.checkmk_view_hosts
new_server.checkmk_view_services = server.checkmk_view_services
# OP5 api filters
new_server.host_filter = server.host_filter
new_server.service_filter = server.service_filter
# Zabbix
new_server.use_description_name_service = server.use_description_name_service
# Prometheus & Alertmanager
new_server.alertmanager_filter = server.alertmanager_filter
new_server.map_to_hostname = server.map_to_hostname
new_server.map_to_servicename = server.map_to_servicename
new_server.map_to_status_information = server.map_to_status_information
# server's individual preparations for HTTP connections (for example cookie creation)
# is done in GetStatus() method of monitor
if server.enabled is True:
new_server.enabled = True
# start with high thread counter so server update thread does not have to wait
new_server.thread_counter = conf.update_interval_seconds
# debug
if conf.debug_mode is True:
new_server.Debug(server=server.name, debug="Created server.")
return new_server
# moved registration process here because of circular dependencies
servers_list = [CentreonServer, IcingaServer, IcingaWeb2Server, MultisiteServer, NagiosServer,
Op5MonitorServer, OpsviewServer, ThrukServer, ZabbixServer, SensuServer,
SensuGoServer, LivestatusServer, ZenossServer, Monitos3Server, Monitos4xServer,
SnagViewServer, PrometheusServer, AlertmanagerServer, ZabbixProblemBasedServer]
# we use these servers conditionally if modules are available only
if icinga2api_is_available is True:
servers_list.append(Icinga2APIServer)
for server in servers_list:
register_server(server)
# create servers
for server in conf.servers.values():
created_server = create_server(server)
if created_server is not None:
servers[server.name] = created_server
# for the next time no auth needed
servers[server.name].refresh_authentication = False
|
ujenmr/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/docker/common.py
|
5
|
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
cacert_path=dict(type='path', aliases=['tls_ca_cert']),
cert_path=dict(type='path', aliases=['tls_client_cert']),
key_path=dict(type='path', aliases=['tls_client_key']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
tls_verify=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY'])),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = [
['tls', 'tls_verify']
]
DOCKER_REQUIRED_TOGETHER = [
['cert_path', 'key_path']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No docker-py. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Checks whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules installed together as they use the same namespace and "
"cause a corrupt installation. Please uninstall both packages, and re-install only the docker-py or docker python "
"module. It is recommended to install the docker module if no support for Python 2.6 is required. "
"Please note that simply uninstalling one of the modules can leave the other module in a broken state.")
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = "Failed to import docker - %s. Try `pip install docker`"
else:
msg = "Failed to import docker or docker-py - %s. Try `pip install docker` or `pip install docker-py` (Python 2.6)"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
if NEEDS_DOCKER_PY2:
if docker_version < LooseVersion('2.0'):
msg = "Error: docker-py version is %s, while this module requires docker %s. Try `pip uninstall docker-py` and then `pip install docker`"
else:
msg = "Error: docker version is %s. Minimum version required is %s. Use `pip install --upgrade docker` to upgrade."
else:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg = ("Error: docker / docker-py version is %s. Minimum version required is %s. "
"Hint: if you do not need Python 2.6 support, try `pip uninstall docker-py` followed by `pip install docker`")
self.fail(msg % (docker_version, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
if min_docker_api_version is not None:
self.docker_api_version_str = self.version()['ApiVersion']
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['tls_verify'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['tls_verify'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
return result
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("connection params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker host name matches %s. The actual hostname is %s. "
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. "
"You may also use TLS without verification by setting the tls parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg = ("docker-py version is %s. Minimum version required is %s to %s. "
"Consider switching to the 'docker' package if you do not require Python 2.6 support.")
elif self.docker_py_version < LooseVersion('2.0.0'):
msg = ("docker-py version is %s. Minimum version required is %s to %s. "
"You have to switch to the Python 'docker' package. First uninstall 'docker-py' before "
"installing 'docker' to avoid a broken installation.")
else:
msg = "docker version is %s. Minimum version required is %s to %s."
msg = msg % (docker_version, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def get_network(self, name=None, id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and id is None:
return None
result = None
if id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
id = result['Id']
if id is not None:
try:
self.log("Inspecting network Id %s" % id)
result = self.inspect_network(id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if len(images) == 0:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if len(images) == 0 and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) == 0:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not id:
return None
self.log("Find image %s (by ID)" % id)
try:
inspection = self.inspect_image(id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the docker-py images method does not
work consistently. Instead, get the result set for name and manually check if the tag
exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, type):
'''
Compare values a and b as described by method and type.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``type == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``type`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if type == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if type == 'value':
return a == b
elif type == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif type == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif type == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif type == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
|
andersroos/rankedftw
|
refs/heads/master
|
main/migrations/0014_add_on_delete_property.py
|
1
|
# Generated by Django 2.2 on 2019-04-07 17:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0013_fixing_indices'),
]
operations = [
migrations.AlterField(
model_name='player',
name='season',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.Season'),
),
migrations.AlterField(
model_name='rankingdata',
name='ranking',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='ranking_data', to='main.Ranking'),
),
migrations.AlterField(
model_name='rankingstats',
name='ranking',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Ranking'),
),
migrations.AlterField(
model_name='team',
name='season',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='main.Season'),
),
]
|
zstackio/zstack-woodpecker
|
refs/heads/master
|
integrationtest/vm/multihosts/multi_shared_ps/test_san_vm_other_ps_volume_sp.py
|
1
|
'''
New Integration Test for multiple shared primary storage
@author: Legion
'''
import os
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
multi_ps = test_stub.MultiSharedPS()
case_flavor = dict(volume_created_with_vm = dict(with_vm=True, root_sp=True, shared=False, ps2_vm=False),
volume_single_created = dict(with_vm=False, root_sp=False, shared=False, ps2_vm=False),
shared_volume_ceph_vm = dict(with_vm=False, root_sp=True, shared=True, ps2_vm=False),
shared_volume_2ps_vm = dict(with_vm=False, root_sp=False, shared=True, ps2_vm=True)
)
def test():
ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE)
sblk_ps = [ps for ps in ps_inv if ps.type == 'SharedBlock']
if not sblk_ps:
test_util.test_skip('Skip test as there is not SharedBlock primary storage')
flavor = case_flavor[os.getenv('CASE_FLAVOR')]
if flavor['with_vm']:
multi_ps.create_vm(ps_type='SharedBlock', with_data_vol=True, one_volume=True)
else:
multi_ps.create_vm(ps_type='SharedBlock')
if flavor['shared']:
if flavor['ps2_vm']:
multi_ps.create_vm(except_ps_type='SharedBlock')
else:
multi_ps.create_vm(ps_type='SharedBlock')
multi_ps.create_data_volume(shared=True, except_ps_type='SharedBlock')
else:
multi_ps.create_data_volume(except_ps_type='SharedBlock')
if flavor['root_sp']:
multi_ps.create_snapshot(target='vm')
else:
multi_ps.create_snapshot(target='volume')
for vol_uuid in multi_ps.snapshot.keys():
multi_ps.revert_sp(vol_uuid)
multi_ps.create_snapshot(vol_uuid_list=[vol_uuid])
multi_ps.create_snapshot(vol_uuid_list=[vol_uuid])
if not flavor['shared']:
multi_ps.sp_check()
if flavor['shared']:
for data_volume in multi_ps.data_volume.values():
for vm in multi_ps.vm:
data_volume.detach(vm.get_vm().uuid)
if flavor['ps2_vm']:
multi_ps.migrate_vm()
else:
multi_ps.migrate_vm(multi_ps.vm)
for data_volume in multi_ps.data_volume.values():
for vm in multi_ps.vm:
data_volume.attach(vm)
else:
multi_ps.migrate_vm()
for vol_uuid in multi_ps.snapshot.keys():
multi_ps.revert_sp(vol_uuid)
multi_ps.delete_snapshot(vol_uuid)
test_util.test_pass('Ceph VM with other PS Volume Snapshot Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
if multi_ps.vm:
try:
for vm in multi_ps.vm:
vm.destroy()
except:
pass
|
javiplx/cobbler-2.x
|
refs/heads/master
|
cobbler/codes.py
|
2
|
"""
various codes and constants used by Cobbler
Copyright 2006-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import utils
# OS variants table. This is a variance of the data from
# ls /usr/lib/python2.X/site-packages/virtinst/FullVirtGuest.py
# but replicated here as we can't assume cobbler is installed on a system with libvirt.
# in many cases it will not be (i.e. old EL4 server, etc) and we need this info to
# know how to validate --os-variant and --os-version.
#
# The keys of this hash correspond with the --breed flag in Cobbler.
# --breed has physical provisioning semantics as well as virt semantics.
#
# presense of something in this table does /not/ mean it's supported.
# for instance, currently, "redhat", "debian", and "suse" do something interesting.
# the rest are undefined (for now), this will evolve.
VALID_OS_BREEDS = [
"redhat", "debian", "ubuntu", "suse", "generic", "windows", "unix", "vmware", "freebsd", "other"
]
VALID_OS_VERSIONS = {
"redhat" : [ "rhel3", "rhel4", "rhel5", "rhel6", "fedora14", "fedora15", "generic24", "generic26", "virtio26", "other" ],
"suse" : [ "sles9", "sles10", "sles11", "generic24", "generic26", "virtio26", "other" ],
"debian" : [ "lenny", "squeeze", "stable", "testing", "unstable", "generic24", "generic26", "other" ],
"ubuntu" : [ "hardy", "lucid", "maverick", "natty" ],
"generic" : [ "generic24", "generic26", "other" ],
"windows" : [ "winxp", "win2k", "win2k3", "vista", "other" ],
"unix" : [ "solaris9", "solaris10", "freebsd7", "freebsd8", "other" ],
"vmware" : [ "esx4", "esxi4" ],
"freebsd" : [ "freebsd8.0", "freebsd8.1", "freebsd8.2" ],
"other" : [ "msdos", "netware4", "netware5", "netware6", "generic", "other" ]
}
VALID_REPO_BREEDS = [
# "rsync", "rhn", "yum", "apt"
"rsync", "rhn", "yum"
]
def uniquify(seq, idfun=None):
# this is odd (older mod_python scoping bug?) but we can't use
# utils.uniquify here because on older distros (RHEL4/5)
# mod_python gets another utils. As a result,
# it is duplicated here for now. Bad, but ... now you know.
#
# credit: http://www.peterbe.com/plog/uniqifiers-benchmark
# FIXME: if this is actually slower than some other way, overhaul it
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def get_all_os_versions():
"""
Collapse the above list of OS versions for usage/display by the CLI/webapp.
"""
results = ['']
for x in VALID_OS_VERSIONS.keys():
for y in VALID_OS_VERSIONS[x]:
results.append(y)
results = uniquify(results)
results.sort()
return results
|
cpaulik/scipy
|
refs/heads/master
|
scipy/_lib/_numpy_compat.py
|
71
|
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
|
nycholas/ask-undrgz
|
refs/heads/master
|
src/ask-undrgz/django/contrib/gis/gdal/srs.py
|
30
|
"""
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print srs
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print srs.ellipsoid
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print srs.projected, srs.geographic
False True
>>> srs.import_epsg(32140)
>>> print srs.name
NAD83 / Texas South Central
"""
import re
from ctypes import byref, c_char_p, c_int, c_void_p
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL website,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
buf = c_char_p('')
srs_type = 'user'
if isinstance(srs_input, basestring):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, unicode):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, (int, long)):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print srs['GEOGCS']
WGS 84
>>> print srs['DATUM']
WGS_1984
>>> print srs['AUTHORITY']
EPSG
>>> print srs['AUTHORITY', 1] # The authority value
4326
>>> print srs['TOWGS84', 4] # the fourth value in this wkt
0
>>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole.
EPSG
>>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, basestring) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, target, index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, target)
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, target)
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
if self.projected or self.local:
return capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
return capi.angular_units(self.ptr, byref(c_char_p()))
else:
return (None, None)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, user_input)
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
|
Argon-Zhou/django
|
refs/heads/master
|
tests/m2m_and_m2o/models.py
|
128
|
"""
Many-to-many and many-to-one relationships to the same table
Make sure to set ``related_name`` if you use relationships to the same table.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class User(models.Model):
username = models.CharField(max_length=20)
@python_2_unicode_compatible
class Issue(models.Model):
num = models.IntegerField()
cc = models.ManyToManyField(User, blank=True, related_name='test_issue_cc')
client = models.ForeignKey(User, related_name='test_issue_client')
def __str__(self):
return six.text_type(self.num)
class Meta:
ordering = ('num',)
class UnicodeReferenceModel(models.Model):
others = models.ManyToManyField("UnicodeReferenceModel")
|
philippeowagner/django-background-tasks-example
|
refs/heads/master
|
project/manage.py
|
404
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rapilabs/django
|
refs/heads/master
|
django/forms/widgets.py
|
106
|
"""
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils import datetime_safe, formats, six
from django.utils.datastructures import MultiValueDict
from django.utils.dates import MONTHS
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = (super(MediaDefiningClass, mcs)
.__new__(mcs, name, bases, attrs))
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value:
value = None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id')
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and hasattr(value, 'url'))
def get_template_substitution_values(self, value):
"""
Return value-related substitutions.
"""
return {
'initial': conditional_escape(value),
'initial_url': conditional_escape(value.url),
}
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{}>\r\n{}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def _format_value(self, value):
return formats.localize_input(value,
self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{}"{}>{}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propagate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html,
choice_value=force_text(w), sub_widgets=''))
return format_html(self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
select_widget = Select
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
if year_val is None:
match = self.date_re.match(value)
if match:
year_val, month_val, day_val = [int(val) for val in match.groups()]
html = {}
choices = [(i, i) for i in self.years]
html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value)
choices = list(self.months.items())
html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value)
choices = [(i, i) for i in range(1, 32)]
html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value)
output = []
for field in self._parse_date_fmt():
output.append(html[field])
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def create_select(self, name, field, value, val, choices, none_value):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not self.is_required:
choices.insert(0, none_value)
local_attrs = self.build_attrs(id=field % id_)
s = self.select_widget(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
|
ForcerKing/ShaoqunXu-mysql5.7
|
refs/heads/master
|
storage/ndb/mcc/tst/unittest2/suite.py
|
166
|
"""TestSuite"""
import sys
import unittest
from unittest2 import case, util
__unittest = True
class BaseTestSuite(unittest.TestSuite):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("%r is not callable" % (repr(test),))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result):
self._wrapped_run(result)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self._wrapped_run(debug, True)
self._tearDownPreviousClass(None, debug)
self._handleModuleTearDown(debug)
################################
# private methods
def _wrapped_run(self, result, debug=False):
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if hasattr(test, '_wrapped_run'):
test._wrapped_run(result, debug)
elif not debug:
test(result)
else:
test.debug()
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
try:
setUpClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
try:
tearDownClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
40223125/40223125-2
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/antigravity.py
|
917
|
import webbrowser
import hashlib
webbrowser.open("http://xkcd.com/353/")
def geohash(latitude, longitude, datedow):
'''Compute geohash() using the Munroe algorithm.
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
37.857713 -122.544543
'''
# http://xkcd.com/426/
h = hashlib.md5(datedow).hexdigest()
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.