repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
nck0405/MyOwn
|
refs/heads/master
|
controllers/scenario.py
|
18
|
# -*- coding: utf-8 -*-
"""
Scenario Module - Controllers
http://eden.sahanafoundation.org/wiki/BluePrintScenario
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to scenario/create """
redirect(URL(f="scenario", args="create"))
# -----------------------------------------------------------------------------
def scenario():
""" RESTful CRUD controller """
s3db.configure("scenario_config",
deletable=False)
# Pre-process
def prep(r):
if r.interactive and r.component:
if r.component.name != "config":
s3.crud.submit_button = T("Assign")
s3.crud_labels["DELETE"] = T("Remove")
if r.component_name == "site":
field = db.scenario_site.site_id
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader = s3db.scenario_rheader)
return output
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
current.xml.show_ids = True
return True
s3.prep = prep
return s3_rest_controller("pr", "person")
# END =========================================================================
|
polymorphm/LiRuReferer
|
refs/heads/master
|
_liru_referer_lib__api_2655426353/task_widget.py
|
1
|
#-*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without_path even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys, os.path, random
import gtk
from .short_history_buffer import ShortHistoryBuffer
from .task_ctrl import TaskCtrl
from .cyclic_list_file import open_cyclic_list
from .task import Task
TASK_WIDGET_GLADE = os.path.join(
os.path.dirname(__file__).decode(sys.getfilesystemencoding()),
'task_widget.glade',
)
LOG_BUFFER_SIZE = 100
class TaskWidget:
def __init__(
self, main_window_ref,
general_task_ctrl,
source_site,
target_sites_list,
limit,
workers,
worker_sleep,
):
self.main_window_ref = main_window_ref
self.general_task_ctrl = general_task_ctrl
self.builder = gtk.Builder()
self.builder.add_from_file(TASK_WIDGET_GLADE)
self.widget = self.builder.get_object('widget_alignment')
self._source_site = source_site
self._target_sites_list = target_sites_list
self._limit = limit
self.log_buffer = ShortHistoryBuffer(LOG_BUFFER_SIZE)
self.builder.connect_signals(self)
self.builder.get_object('source_site_entry').\
set_text(source_site.encode('utf-8'))
self.builder.get_object('target_sites_list_entry').\
set_text(target_sites_list.encode('utf-8'))
limit_msg = 'Да, %s' % limit \
if limit is not None else 'Нет (бесконечно)'
self.builder.get_object('limit_entry').\
set_text(limit_msg.encode('utf-8'))
self.builder.get_object('workers_spinbutton').\
set_value(workers)
self.builder.get_object('worker_sleep_spinbutton').\
set_value(worker_sleep)
def get_source_site(self):
return self._source_site
def get_target_sites_list(self):
return self._target_sites_list
def get_limit(self):
return self._limit
def get_workers(self):
widget = self.builder.get_object('workers_spinbutton')
value = widget.get_value_as_int()
return value
def get_worker_sleep(self):
widget = self.builder.get_object('worker_sleep_spinbutton')
value = widget.get_value()
if value:
# добавляем случайность
value *= (0.5 + random.random())
return value
def on_log_checkbutton_toggled(self, widget):
visible = widget.get_active()
low_window = self.builder.get_object('log_scrolledwindow')
if visible:
log_widget = self.builder.get_object('log_textbuffer')
log_widget.set_text(self.log_buffer.getvalue().encode('utf-8'))
low_window.show()
else:
low_window.hide()
def clean_log(self):
self.log_buffer.clean()
widget = self.builder.get_object('log_textbuffer')
widget.set_text(b'')
def set_log(self, value):
decor_value = '%s\n%s\n' % (value, '-' * 50)
self.log_buffer.append(decor_value)
visible = self.builder.get_object('log_checkbutton').get_active()
if visible:
widget = self.builder.get_object('log_textbuffer')
if len(self.log_buffer) > 1:
# обычное наполнение log-виджета (оно быстрее чем постоянная синхронизация с <self.log_buffer>)
end_iter = widget.get_end_iter()
widget.insert(end_iter, decor_value.encode('utf-8'))
else:
# чистка log-виджета (чистка путём синхронизации с <self.log_buffer>)
widget.set_text(self.log_buffer.getvalue().encode('utf-8'))
def set_successes(self, value):
assert isinstance(value, int)
widget = self.builder.get_object('successes_entry')
widget.set_text(unicode(value).encode('utf-8'))
def set_errors(self, value):
assert isinstance(value, int)
widget = self.builder.get_object('errors_entry')
widget.set_text(unicode(value).encode('utf-8'))
def set_progress(self, value):
if value < 0:
value = 0
elif value > 1:
value = 1
widget = self.builder.get_object('progressbar')
widget.set_fraction(value)
def clean(self):
self.set_successes(0)
self.set_errors(0)
self.set_progress(0.0)
self.clean_log()
def on_abort_button_clicked(self, widget):
self.task_ctrl.stop()
self.set_log('Остановлено аварийно')
def _stop_handler(self, sender):
if sender == self.task_ctrl:
self.builder.get_object('abort_button').set_sensitive(False)
def run(self):
self.clean()
self.task_ctrl = TaskCtrl(self.general_task_ctrl)
self.builder.get_object('abort_button').set_sensitive(True)
self.task_ctrl.connect('stop', self._stop_handler)
self.task = Task(
self.task_ctrl,
self.get_source_site,
open_cyclic_list(self.get_target_sites_list()),
self.get_limit,
self.get_workers,
self.get_worker_sleep,
set_successes=self.set_successes,
set_errors=self.set_errors,
set_log=self.set_log,
set_progress=self.set_progress,
)
self.task.run()
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/requests-2.6.0/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
|
2057
|
try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
kbdick/RecycleTracker
|
refs/heads/gh-pages
|
recyclecollector/scrap/gdata-2.0.18/upload-diffs.py
|
23
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/x-freemind']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=",afshar@google.com",
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default="gdata-python-client-library-contributors@googlegroups.com",
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=True,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the files, so we can upload them along with our diff.
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
gitdiff = RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
# Todo(hayato): Windows users might use different path for configuration file.
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
michaelgallacher/intellij-community
|
refs/heads/master
|
python/testData/completion/keywordArgumentsForImplicitCall.after.py
|
83
|
class C:
def xyzzy(self, shazam):
pass
def foo(param):
param.xyzzy(shazam=)
|
jn7163/django
|
refs/heads/master
|
django/db/backends/base/schema.py
|
339
|
import hashlib
import logging
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%s' % self._digest(table_name, *column_names)
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
|
yousafsyed/casperjs
|
refs/heads/master
|
bin/Lib/lib2to3/fixes/fix_exitfunc.py
|
140
|
"""
Convert use of sys.exitfunc to use the atexit module.
"""
# Author: Benjamin Peterson
from lib2to3 import pytree, fixer_base
from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
class FixExitfunc(fixer_base.BaseFix):
keep_line_order = True
BM_compatible = True
PATTERN = """
(
sys_import=import_name<'import'
('sys'
|
dotted_as_names< (any ',')* 'sys' (',' any)* >
)
>
|
expr_stmt<
power< 'sys' trailer< '.' 'exitfunc' > >
'=' func=any >
)
"""
def __init__(self, *args):
super(FixExitfunc, self).__init__(*args)
def start_tree(self, tree, filename):
super(FixExitfunc, self).start_tree(tree, filename)
self.sys_import = None
def transform(self, node, results):
# First, find a the sys import. We'll just hope it's global scope.
if "sys_import" in results:
if self.sys_import is None:
self.sys_import = results["sys_import"]
return
func = results["func"].clone()
func.prefix = ""
register = pytree.Node(syms.power,
Attr(Name("atexit"), Name("register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
if self.sys_import is None:
# That's interesting.
self.warning(node, "Can't find sys import; Please add an atexit "
"import at the top of your file.")
return
# Now add an atexit import after the sys import.
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
names.append_child(Name("atexit", " "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
[Name("import"), Name("atexit", " ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
containing_stmt.insert_child(position + 2, new)
|
ffissore/badger
|
refs/heads/master
|
yun/root/oauth2client/__init__.py
|
36
|
"""Client library for using OAuth2, especially with Google APIs."""
__version__ = '1.4.11'
GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_DEVICE_URI = 'https://accounts.google.com/o/oauth2/device/code'
GOOGLE_REVOKE_URI = 'https://accounts.google.com/o/oauth2/revoke'
GOOGLE_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
|
Alwnikrotikz/dpkt
|
refs/heads/master
|
dpkt/dpkt.py
|
16
|
# $Id$
"""Simple packet creation and parsing."""
import copy, itertools, socket, struct
class Error(Exception): pass
class UnpackError(Error): pass
class NeedData(UnpackError): pass
class PackError(Error): pass
class _MetaPacket(type):
def __new__(cls, clsname, clsbases, clsdict):
t = type.__new__(cls, clsname, clsbases, clsdict)
st = getattr(t, '__hdr__', None)
if st is not None:
# XXX - __slots__ only created in __new__()
clsdict['__slots__'] = [ x[0] for x in st ] + [ 'data' ]
t = type.__new__(cls, clsname, clsbases, clsdict)
t.__hdr_fields__ = [ x[0] for x in st ]
t.__hdr_fmt__ = getattr(t, '__byte_order__', '>') + \
''.join([ x[1] for x in st ])
t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
t.__hdr_defaults__ = dict(zip(
t.__hdr_fields__, [ x[2] for x in st ]))
return t
class Packet(object):
"""Base packet class, with metaclass magic to generate members from
self.__hdr__.
__hdr__ should be defined as a list of (name, structfmt, default) tuples
__byte_order__ can be set to override the default ('>')
Example::
>>> class Foo(Packet):
... __hdr__ = (('foo', 'I', 1), ('bar', 'H', 2), ('baz', '4s', 'quux'))
...
>>> foo = Foo(bar=3)
>>> foo
Foo(bar=3)
>>> str(foo)
'\x00\x00\x00\x01\x00\x03quux'
>>> foo.bar
3
>>> foo.baz
'quux'
>>> foo.foo = 7
>>> foo.baz = 'whee'
>>> foo
Foo(baz='whee', foo=7, bar=3)
>>> Foo('hello, world!')
Foo(baz=' wor', foo=1751477356L, bar=28460, data='ld!')
"""
__metaclass__ = _MetaPacket
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- optional packet buffer to unpack
Optional keyword arguments correspond to members to set
(matching fields in self.__hdr__, or 'data').
"""
self.data = ''
if args:
try:
self.unpack(args[0])
except struct.error:
if len(args[0]) < self.__hdr_len__:
raise NeedData
raise UnpackError('invalid %s: %r' %
(self.__class__.__name__, args[0]))
else:
for k in self.__hdr_fields__:
setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __getitem__(self, k):
try: return getattr(self, k)
except AttributeError: raise KeyError
def __repr__(self):
l = [ '%s=%r' % (k, getattr(self, k))
for k in self.__hdr_defaults__
if getattr(self, k) != self.__hdr_defaults__[k] ]
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return self.pack_hdr() + str(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return struct.pack(self.__hdr_fmt__,
*[ getattr(self, k) for k in self.__hdr_fields__ ])
except struct.error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
try:
return struct.pack(self.__hdr_fmt__, *vals)
except struct.error, e:
raise PackError(str(e))
def pack(self):
"""Return packed header + self.data string."""
return str(self)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
for k, v in itertools.izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = """................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~................................................................................................................................."""
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % ord(x) for x in line])
line = line.translate(__vis_filter)
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
try:
import dnet
def in_cksum_add(s, buf):
return dnet.ip_cksum_add(buf, s)
def in_cksum_done(s):
return socket.ntohs(dnet.ip_cksum_carry(s))
except ImportError:
import array
def in_cksum_add(s, buf):
n = len(buf)
cnt = (n / 2) * 2
a = array.array('H', buf[:cnt])
if cnt != n:
a.append(struct.unpack('H', buf[-1] + '\x00')[0])
return s + sum(a)
def in_cksum_done(s):
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
|
mtivadar/qiew
|
refs/heads/master
|
HexViewMode.py
|
1
|
from ViewMode import *
from cemu import *
import TextSelection
from TextDecorators import *
import string
import PyQt5
from PyQt5 import QtGui, QtCore, QtWidgets
class HexViewMode(ViewMode):
def __init__(self, width, height, data, cursor, widget=None, plugin=None):
super(HexViewMode, self).__init__()
self.dataModel = data
self.width = width
self.height = height
self.refresh = True
self.selector = TextSelection.HexSelection(self)
self.widget = widget
self.addHandler(self.dataModel)
# background brush
self.backgroundBrush = QtGui.QBrush(QtGui.QColor(0, 0, 128))
# text font
self.font = QtGui.QFont('Terminus', 11, QtGui.QFont.Light)
# font metrics. assume font is monospaced
self.font.setKerning(False)
self.font.setFixedPitch(True)
fm = QtGui.QFontMetrics(self.font)
self._fontWidth = fm.width('a')
self._fontHeight = fm.height()
self.Special = string.ascii_letters + string.digits + ' .;\':;=\"?-!()/\\_'
self.textPen = QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine)
self.cursor = cursor
self.HexColumns = [1, 4, 8, 16, 32, 36, 40]
self.idxHexColumns = 3 # 32 columns
self.newPix = None
self.Ops = []
self.gap = 5
self.plugin = plugin
self.highpart = True
self.resize(width, height)
self.ann_w = Annotation(self.widget, self)
@property
def fontWidth(self):
return self._fontWidth
@property
def fontHeight(self):
return self._fontHeight
def setTransformationEngine(self, engine):
self.transformationEngine = engine
self.original_textdecorator = engine
def _getNewPixmap(self, width, height):
return QtGui.QPixmap(width, height)
def getPixmap(self):
#return self.qpix
for t in self.Ops:
if len(t) == 1:
t[0]()
else:
t[0](*t[1:])
self.Ops = []
if not self.newPix:
self.draw()
return self.newPix
def getGeometry(self):
return self.COLUMNS, self.ROWS
def getColumnsbyRow(self, row):
return self.COLUMNS
def getDataModel(self):
return self.dataModel
def startSelection(self):
self.selector.startSelection()
def stopSelection(self):
self.selector.stopSelection()
def getPageOffset(self):
return self.dataModel.getOffset()
def getCursorAbsolutePosition(self):
x, y = self.cursor.getPosition()
return self.dataModel.getOffset() + y*self.COLUMNS + x
def computeTextArea(self):
self.COLUMNS = self.HexColumns[self.idxHexColumns]
self.CON_COLUMNS = self.width//self.fontWidth
self.ROWS = self.height//self.fontHeight
self.notify(self.ROWS, self.COLUMNS)
def resize(self, width, height):
self.width = width - width%self.fontWidth
self.height = height - height%self.fontHeight
self.computeTextArea()
self.qpix = self._getNewPixmap(self.width, self.height + self.SPACER)
self.refresh = True
def changeHexColumns(self):
if self.idxHexColumns == len(self.HexColumns) - 1:
self.idxHexColumns = 0
else:
self.idxHexColumns += 1
# if screen is ont big enough, retry
if self.HexColumns[self.idxHexColumns]*(3+1) + self.gap >= self.CON_COLUMNS:
self.changeHexColumns()
return
self.resize(self.width, self.height)
def scroll(self, dx, dy):
if dx != 0:
if self.dataModel.inLimits((self.dataModel.getOffset() - dx)):
self.dataModel.slide(-dx)
self.scroll_h(dx)
if dy != 0:
if self.dataModel.inLimits((self.dataModel.getOffset() - dy*self.COLUMNS)):
self.dataModel.slide(-dy*self.COLUMNS)
self.scroll_v(dy)
else:
if dy <= 0:
pass
#self.dataModel.slideToLastPage()
else:
self.dataModel.slideToFirstPage()
self.draw(refresh=True)
self.draw()
def scrollPages(self, number):
self.scroll(0, -number*self.ROWS)
def drawAdditionals(self):
self.newPix = self._getNewPixmap(self.width, self.height + self.SPACER)
qp = QtGui.QPainter()
qp.begin(self.newPix)
qp.drawPixmap(0, 0, self.qpix)
#self.transformationEngine.decorateText()
# highlight selected text
self.selector.highlightText()
# draw other selections
self.selector.drawSelections(qp)
# draw our cursor
self.drawCursor(qp)
# draw dword lines
for i in range(self.COLUMNS//4)[1:]:
xw = i*4*3*self.fontWidth - 4
qp.setPen(QtGui.QColor(0, 255, 0))
qp.drawLine(xw, 0, xw, self.ROWS*self.fontHeight)
qp.end()
def scroll_h(self, dx):
gap = self.gap
# hex part
self.qpix.scroll(dx*3*self.fontWidth, 0, QtCore.QRect(0, 0, self.COLUMNS*3*self.fontWidth, self.ROWS*self.fontHeight + self.SPACER))
# text part
self.qpix.scroll(dx*self.fontWidth, 0, QtCore.QRect((self.COLUMNS*3 + gap)*self.fontWidth , 0, self.COLUMNS*self.fontWidth, self.ROWS*self.fontHeight + self.SPACER))
qp = QtGui.QPainter()
qp.begin(self.qpix)
qp.setFont(self.font)
qp.setPen(self.textPen)
factor = abs(dx)
# There are some trails from the characters, when scrolling. trail == number of pixel to erase near the character
trail = 5
textBegining = self.COLUMNS*3 + gap
if dx < 0:
# hex
qp.fillRect((self.COLUMNS - 1*factor)*3*self.fontWidth, 0, factor * self.fontWidth * 3, self.ROWS*self.fontHeight + self.SPACER, self.backgroundBrush)
# text
qp.fillRect((textBegining + self.COLUMNS - 1*factor)*self.fontWidth, 0, factor * self.fontWidth+trail, self.ROWS*self.fontHeight + self.SPACER, self.backgroundBrush)
if dx > 0:
# hex
qp.fillRect(0, 0, factor * 3 * self.fontWidth, self.ROWS*self.fontHeight + self.SPACER, self.backgroundBrush)
# text
qp.fillRect(textBegining*self.fontWidth - trail, 0, factor * self.fontWidth + trail, self.ROWS*self.fontHeight + self.SPACER, self.backgroundBrush)
cemu = ConsoleEmulator(qp, self.ROWS, self.CON_COLUMNS)
page = self.transformationEngine.decorate()
# scriem pe fiecare coloana in parte
for column in range(factor):
# fiecare caracter de pe coloana
for i in range(self.ROWS):
if dx < 0:
# cu (column) selectam coloana
idx = (i+1)*(self.COLUMNS) - (column + 1)
if dx > 0:
idx = (i)*(self.COLUMNS) + (column)
if len(self.getDisplayablePage()) > idx:
qp.setPen(self.transformationEngine.choosePen(idx))
else:
break
if self.transformationEngine.chooseBrush(idx) != None:
qp.setBackgroundMode(1)
qp.setBackground(self.transformationEngine.chooseBrush(idx))
c = self.getDisplayablePage()[idx]
hex_s = str(hex(c)[2:]).zfill(2) + ' '
if dx < 0:
cemu.writeAt((self.COLUMNS - (column + 1))*3, i, hex_s, noBackgroudOnSpaces=True)
cemu.writeAt(textBegining + self.COLUMNS - (column + 1), i, self.cp437(c))
if dx > 0:
cemu.writeAt((column)*3, i, hex_s, noBackgroudOnSpaces=True)
cemu.writeAt(textBegining + column, i, self.cp437(c))
qp.setBackgroundMode(0)
qp.end()
def scroll_v(self, dy):
self.qpix.scroll(0, dy*self.fontHeight, self.qpix.rect())
qp = QtGui.QPainter()
qp.begin(self.qpix)
qp.setFont(self.font)
qp.setPen(self.textPen)
factor = abs(dy)
cemu = ConsoleEmulator(qp, self.ROWS, self.CON_COLUMNS)
if dy < 0:
cemu.gotoXY(0, self.ROWS - factor)
qp.fillRect(0, (self.ROWS-factor)*self.fontHeight, self.fontWidth*self.CON_COLUMNS, factor * self.fontHeight + self.SPACER, self.backgroundBrush)
if dy > 0:
cemu.gotoXY(0, 0)
qp.fillRect(0, 0, self.fontWidth*self.CON_COLUMNS, factor * self.fontHeight, self.backgroundBrush)
page = self.transformationEngine.decorate()
# how many rows
for row in range(factor):
# for every column
for i in range(self.COLUMNS):
if dy < 0:
# we write from top-down, so get index of the first row that will be displayed
# this is why we have factor - row
idx = i + (self.ROWS - (factor - row))*self.COLUMNS
if dy > 0:
idx = i + (self.COLUMNS*row)
qp.setPen(self.transformationEngine.choosePen(idx))
if self.transformationEngine.chooseBrush(idx) != None:
qp.setBackgroundMode(1)
qp.setBackground(self.transformationEngine.chooseBrush(idx))
if len(self.getDisplayablePage()) > idx:
c = self.getDisplayablePage()[idx]
else:
break
if i == self.COLUMNS - 1:
hex_s = str(hex(c)[2:]).zfill(2)
else:
hex_s = str(hex(c)[2:]).zfill(2) + ' '
# write hex representation
cemu.write(hex_s, noBackgroudOnSpaces=True)
# save hex position
x, y = cemu.getXY()
# write text
cemu.writeAt(self.COLUMNS*3 + self.gap + (i%self.COLUMNS), y, self.cp437(c))
# go back to hex chars
cemu.gotoXY(x, y)
qp.setBackgroundMode(0)
cemu.writeLn()
qp.end()
def draw(self, refresh=False, row=0, howMany=0):
if self.refresh or refresh:
qp = QtGui.QPainter()
qp.begin(self.qpix)
if not howMany:
howMany = self.ROWS
self.drawTextMode(qp, row=row, howMany=howMany)
self.refresh = False
qp.end()
self.drawAdditionals()
def drawTextMode(self, qp, row=0, howMany=1):
# draw background
qp.fillRect(0, row * self.fontHeight, self.CON_COLUMNS * self.fontWidth, howMany * self.fontHeight + self.SPACER, self.backgroundBrush)
# set text pen&font
qp.setFont(self.font)
qp.setPen(self.textPen)
cemu = ConsoleEmulator(qp, self.ROWS, self.CON_COLUMNS)
page = self.transformationEngine.decorate()
cemu.gotoXY(0, row)
for i, c in enumerate(self.getDisplayablePage()[row*self.COLUMNS:(row + howMany)*self.COLUMNS]): #TODO: does not apply all decorators
w = i + row*self.COLUMNS
if (w+1)%self.COLUMNS == 0:
hex_s = str(hex(c)[2:]).zfill(2)
else:
hex_s = str(hex(c)[2:]).zfill(2) + ' '
qp.setPen(self.transformationEngine.choosePen(w))
if self.transformationEngine.chooseBrush(w) != None:
qp.setBackgroundMode(1)
qp.setBackground(self.transformationEngine.chooseBrush(w))
# write hex representation
cemu.write(hex_s, noBackgroudOnSpaces=True)
# save hex position
x, y = cemu.getXY()
# write text
cemu.writeAt(self.COLUMNS*3 + self.gap + (w%self.COLUMNS), y, self.cp437(c))
# go back to hex chars
cemu.gotoXY(x, y)
if (w+1)%self.COLUMNS == 0:
cemu.writeLn()
qp.setBackgroundMode(0)
def moveCursor(self, direction):
#TODO: have to move this, don't like it
if self.isInEditMode():
if self.highpart == False:
self.highpart = True
cursorX, cursorY = self.cursor.getPosition()
if direction == Directions.Left:
if cursorX == 0:
if cursorY == 0:
self.scroll(1, 0)
else:
self.cursor.moveAbsolute(self.COLUMNS-1, cursorY - 1)
else:
self.cursor.move(-1, 0)
if direction == Directions.Right:
if self.getCursorAbsolutePosition() + 1 >= self.dataModel.getDataSize():
return
if cursorX == self.COLUMNS-1:
if cursorY == self.ROWS-1:
self.scroll(-1, 0)
else:
self.cursor.moveAbsolute(0, cursorY + 1)
else:
self.cursor.move(1, 0)
if direction == Directions.Down:
if self.getCursorAbsolutePosition() + self.COLUMNS >= self.dataModel.getDataSize():
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize()-1)
self.cursor.moveAbsolute(x, y)
return
if cursorY == self.ROWS-1:
self.scroll(0, -1)
else:
self.cursor.move(0, 1)
if direction == Directions.Up:
if cursorY == 0:
self.scroll(0, 1)
else:
self.cursor.move(0, -1)
if direction == Directions.End:
if self.dataModel.getDataSize() < self.getCursorAbsolutePosition() + self.ROWS * self.COLUMNS:
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize()-1)
self.cursor.moveAbsolute(x, y)
else:
self.cursor.moveAbsolute(self.COLUMNS-1, self.ROWS-1)
if direction == Directions.Home:
self.cursor.moveAbsolute(0, 0)
if direction == Directions.CtrlHome:
self.dataModel.slideToFirstPage()
self.draw(refresh=True)
self.cursor.moveAbsolute(0, 0)
if direction == Directions.CtrlEnd:
self.dataModel.slideToLastPage()
self.draw(refresh=True)
self.moveCursor(Directions.End)
def drawCursor(self, qp):
qp.setBrush(QtGui.QColor(255, 255, 0))
if self.isInEditMode():
qp.setBrush(QtGui.QColor(255, 102, 179))
cursorX, cursorY = self.cursor.getPosition()
columns = self.HexColumns[self.idxHexColumns]
if cursorX > columns:
self.cursor.moveAbsolute(columns-1, cursorY)
# get cursor position again, maybe it was moved
cursorX, cursorY = self.cursor.getPosition()
qp.setOpacity(0.8)
if self.isInEditMode():
qp.setOpacity(0.5)
# cursor on text
qp.drawRect((self.COLUMNS*3 + self.gap + cursorX)*self.fontWidth, cursorY*self.fontHeight+2, self.fontWidth, self.fontHeight)
# cursor on hex
if not self.isInEditMode():
qp.drawRect(cursorX*3*self.fontWidth, cursorY*self.fontHeight+2, 2*self.fontWidth, self.fontHeight)
else:
if self.highpart:
qp.drawRect(cursorX*3*self.fontWidth, cursorY*self.fontHeight+2, 1*self.fontWidth, self.fontHeight)
else:
qp.drawRect(cursorX*3*self.fontWidth + self.fontWidth, cursorY*self.fontHeight+2, 1*self.fontWidth, self.fontHeight)
qp.setOpacity(1)
def keyFilter(self):
return [
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Right),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Left),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Up),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Down),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_End),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Home),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Right),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Left),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Up),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Down),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_End),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Home),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_PageDown),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_PageUp)
]
def anon(self, dx, dy):
self.scroll(dx, dy)
# scroll modifies datamodel offset, so we must do scroll and cursor
# operations toghether
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize() - 1)
if self.getCursorAbsolutePosition() >= self.dataModel.getDataSize():
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize() - 1)
self.cursor.moveAbsolute(x, y)
# we call draw() again because it was called before by scroll()
# and the cursor is already painted but it's not in correct position
# kinda hack, don't really like it
self.draw()
def handleEditMode(self, modifiers, key, event):
if str(event.text()).lower() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']:
offs = self.getCursorOffsetInPage()
b = self.dataModel.getBYTE(self.dataModel.getOffset() + offs)
if b is None:
return
z = int(str(event.text()), 16)
# compute nibble
if self.highpart:
b = ((z << 4) | (b & 0x0F)) & 0xFF
else:
b = ((b & 0xF0) | (z & 0x0F)) & 0xFF
block = modifiers == QtCore.Qt.AltModifier and self.selector.getCurrentSelection()
# change block or single byte
if block:
# multiple, with ALT key
if self.selector.getCurrentSelection():
u, v = self.selector.getCurrentSelection()
for x in range(u, v):
b = self.dataModel.getBYTE(x)
if self.highpart:
b = ((z << 4) | (b & 0x0F)) & 0xFF
else:
b = ((b & 0xF0) | (z & 0x0F)) & 0xFF
self.dataModel.setData_b(x, b)
else:
self.dataModel.setData_b(self.dataModel.getOffset() + offs, b)
if block:
self.transformationEngine = RangePen(self.original_textdecorator, u, v, QtGui.QPen(QtGui.QColor(218, 94, 242), 0, QtCore.Qt.SolidLine), ignoreHighlights=True)
else:
z = self.dataModel.getOffset() + offs
#TODO: sa nu se repete, tre original_transformengine
self.transformationEngine = RangePen(self.original_textdecorator, z, z + 0, QtGui.QPen(QtGui.QColor(218, 94, 242), 0, QtCore.Qt.SolidLine), ignoreHighlights=True)
# se if we are at end of row, we must also redraw previous line
highpart = self.highpart
# for block mode, move cursor
if not block:
x, old_y = self.cursor.getPosition()
if self.highpart == False:
self.moveCursor(Directions.Right)
x, y = self.cursor.getPosition()
if highpart:
self.highpart = False
else:
self.highpart = True
if block:
self.draw(refresh=True)
else:
self.draw(refresh=True, row=y, howMany=1)
if y > old_y:
self.draw(refresh=True, row=y-1, howMany=1)
def handleKeyEvent(self, modifiers, key, event=None):
if event.type() == QtCore.QEvent.KeyRelease:
if key == QtCore.Qt.Key_Shift:
self.stopSelection()
return True
if event.type() == QtCore.QEvent.KeyPress:
if modifiers == QtCore.Qt.ShiftModifier:
keys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Down, QtCore.Qt.Key_Up, QtCore.Qt.Key_End, QtCore.Qt.Key_Home]
if key in keys:
self.startSelection()
if key == QtCore.Qt.Key_Question:
self.annotationWindow()
if modifiers == QtCore.Qt.AltModifier:
if key == QtCore.Qt.Key_A:
self.add_annotation(1)
return True
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_A:
self.add_annotation(2)
if key == QtCore.Qt.Key_Right:
self.addop((self.anon, -1, 0))
if key == QtCore.Qt.Key_Left:
self.addop((self.scroll, 1, 0))
if key == QtCore.Qt.Key_Down:
self.addop((self.anon, 0, -1))
if key == QtCore.Qt.Key_Up:
self.addop((self.scroll, 0, 1))
if key == QtCore.Qt.Key_End:
self.moveCursor(Directions.CtrlEnd)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.CtrlHome)
self.addop((self.draw,))
return True
else:#selif modifiers == QtCore.Qt.NoModifier:
if key == QtCore.Qt.Key_Escape:
self.selector.resetSelections()
self.addop((self.draw,))
if key == QtCore.Qt.Key_Left:
self.moveCursor(Directions.Left)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Right:
self.moveCursor(Directions.Right)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Down:
self.moveCursor(Directions.Down)
self.addop((self.draw,))
if key == QtCore.Qt.Key_End:
self.moveCursor(Directions.End)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.Home)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Up:
self.moveCursor(Directions.Up)
self.addop((self.draw,))
if key == QtCore.Qt.Key_PageDown:
self.addop((self.scrollPages, 1))
if key == QtCore.Qt.Key_PageUp:
self.addop((self.scrollPages, -1))
if key == QtCore.Qt.Key_F6:
self.changeHexColumns()
x, y = self.cursor.getPosition()
columns = self.HexColumns[self.idxHexColumns]
if x > columns:
self.cursor.moveAbsolute(columns-1, y)
self.addop((self.draw,))
if self.isInEditMode():
self.handleEditMode(modifiers, key, event)
return True
return False
def isEditable(self):
return True
def setEditMode(self, mode):
super(HexViewMode, self).setEditMode(mode)
if mode == False:
self.highpart = True
self.transformationEngine = self.original_textdecorator
self.transformationEngine.reset()
self.draw(refresh=True)
for shortcut in self.plugin.getShortcuts():
if shortcut.key().toString() in list('0123456789abcdefABCDEF') + ['Alt+A', 'Alt+B', 'Alt+C', 'Alt+D', 'Alt+E', 'Alt+F']:
shortcut.setEnabled(True)
if mode == True:
for shortcut in self.plugin.getShortcuts():
if shortcut.key().toString() in list('0123456789abcdefABCDEF') + ['Alt+A', 'Alt+B', 'Alt+C', 'Alt+D', 'Alt+E', 'Alt+F']:
shortcut.setEnabled(False)
def addop(self, t):
self.Ops.append(t)
def getHeaderInfo(self):
s = ''
for i in range(self.HexColumns[self.idxHexColumns]):
s += '{0} '.format('{0:x}'.format(i).zfill(2))
s += self.gap*' ' + 'Text'
return s
def annotationWindow(self):
w = self.ann_w.treeWidget
w.setDragEnabled(True)
w.viewport().setAcceptDrops(True)
w.setDropIndicatorShown(True)
self.ann_w.show()
@QtCore.pyqtSlot("QItemSelection, QItemSelection")
def selectionChanged(self, selected, deselected):
item = self.ann_w.treeWidget.currentItem()
if item:
offset = item.getOffset()
size = item.getSize()
u = offset
v = offset + size
self.selector.addSelection((u, v, QtGui.QBrush(QtGui.QColor(125, 255, 0)), 0.2), type=TextSelection.SelectionType.NORMAL)
self.goTo(u)
@QtCore.pyqtSlot("QTreeWidgetItem*, int")
def itemChanged(self, item, column):
ID_NAME = 0
ID_DESCRIPTION = 4
s = str(item.text(column))
if column == ID_NAME:
item.setName(s)
if column == ID_DESCRIPTION:
item.setDescription(s)
def add_annotation(self, mode):
#FIXME: check if this connect call is the correct way in PyQt5
# QtCore.QObject.connect(self.ann_w.treeWidget.selectionModel(), QtCore.SIGNAL('selectionChanged(QItemSelection, QItemSelection)'), self.selectionChanged)
self.ann_w.treeWidget.selectionModel().selectionChanged.connect(lambda selected, deselected: self.selectionChanged(selected, deselected))
# QtCore.QObject.connect(self.ann_w.treeWidget, QtCore.SIGNAL('itemChanged(QTreeWidgetItem*, int)'), self.itemChanged)
self.ann_w.treeWidget.itemChanged.connect(lambda item, column: self.itemChanged(item, column))
ID_NAME = 0
ID_OFFSET = 1
ID_SIZE = 2
ID_VALUE = 3
ID_DESCRIPTION = 4
ID_COLOR = 5
if self.selector.getCurrentSelection():
u, v = self.selector.getCurrentSelection()
else:
return
import random
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
opacity = 0.4
if mode == 2:
opacity = 0.25
qcolor = QtGui.QColor(r, g, b)
added = self.selector.addSelection((u, v, QtGui.QBrush(qcolor), opacity), type=TextSelection.SelectionType.PERMANENT)
# if not added:
# return
t = self.ann_w.treeWidget
row = AnnonItem(None, self.ann_w.treeWidget, qcolor.name())
row.setFlags(QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsDropEnabled |
QtCore.Qt.ItemIsDragEnabled)
t.setAcceptDrops(True)
t.setDragEnabled(True)
t.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
delegate = NoEditDelegate()
t.setItemDelegateForColumn(1, delegate)
t.setItemDelegateForColumn(2, delegate)
t.setItemDelegateForColumn(3, delegate)
t.setItemDelegateForColumn(5, delegate)
row.setName(self.ann_w.newFieldName())
row.setOffset(u)
#row.setText(ID_NAME, 'field_0')
#row.setText(ID_OFFSET, hex(u))
size = v - u
#row.setText(ID_SIZE, hex(size))
row.setSize(size)
value = ''
if size == 1:
value = self.dataModel.getBYTE(u, asString=True)
elif size == 2:
value = self.dataModel.getWORD(u, asString=True)
elif size == 4:
value = self.dataModel.getDWORD(u, asString=True)
else:
value = repr(str(self.dataModel.getStream(u, v)))
#row.setText(ID_VALUE, value)
row.setValue(value)
#cmb.setCurrentIndex(cmb.findData(w))
if mode == 2:
self.ann_w.treeWidget.addTopLevelItem(row)
if mode == 1:
selected = t.selectedItems()
if len(selected) == 1:
selected = selected[0]
else:
selected = t.topLevelItem(0)
if selected:
selected.addChild(row)
t.expandItem(row)
#cmb = QColorButton()
#cmb.setColor(qcolor.name())
#self.ann_w.treeWidget.setItemWidget(row, ID_COLOR, cmb)
self.ann_w.treeWidget.setItemWidget(row, ID_COLOR, row.cmb)
#self.ann_w.treeWidget.openPersistentEditor(row, 0)
#self.ann_w.treeWidget.editItem(row, 0)
#self.ann_w.treeWidget.editItem(row, 3)
class NoEditDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent=None):
super(NoEditDelegate, self).__init__(parent)
def createEditor(self, parent, option, index):
return None
class AnnonItem(QtWidgets.QTreeWidgetItem):
ID_NAME = 0
ID_OFFSET = 1
ID_SIZE = 2
ID_VALUE = 3
ID_DESCRIPTION = 4
ID_COLOR = 5
def __init__(self, x, parent, color):
super(AnnonItem, self).__init__(x)
self._color = color
self._t_parent = parent
self.cmb = QColorButton()
self.cmb.setColor(self._color)
#self._t_parent.setItemWidget(self, self.ID_COLOR, self.cmb)
def setName(self, name):
self._name = name
self.setText(self.ID_NAME, name)
def getName(self):
return self._name
def setOffset(self, offset):
self._offset = offset
self.setText(self.ID_OFFSET, hex(offset))
def getOffset(self):
return self._offset
def setSize(self, size):
self._size = size
self.setText(self.ID_SIZE, hex(size))
def getSize(self):
return self._size
def setValue(self, value):
self._value = value
self.setText(self.ID_VALUE, value)
def getValue(self):
return self._value
def setDescription(self, description):
self._description = description
self.setText(self.ID_DESCRIPTION, description)
def getDescription(self):
return self._description
class QColorButton(QtWidgets.QPushButton):
'''
Custom Qt Widget to show a chosen color.
Left-clicking the button shows the color-chooser, while
right-clicking resets the color to None (no-color).
'''
'''
based on http://martinfitzpatrick.name/article/qcolorbutton-a-color-selector-tool-for-pyqt/
'''
colorChanged = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super(QColorButton, self).__init__(*args, **kwargs)
self._color = None
self.setMaximumWidth(32)
self.pressed.connect(self.onColorPicker)
def setColor(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit()
if self._color:
self.setStyleSheet("background-color: %s;" % self._color)
else:
self.setStyleSheet("")
def color(self):
return self._color
def onColorPicker(self):
'''
Show color-picker dialog to select color.
Qt will use the native dialog by default.
'''
dlg = QtGui.QColorDialog(QtGui.QColor(self._color), None)
#if self._color:
# dlg.setCurrentColor(QtGui.QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name())
def mousePressEvent(self, e):
if e.button() == QtCore.Qt.RightButton:
self.setColor(None)
return super(QColorButton, self).mousePressEvent(e)
class ComboBoxItem(QtWidgets.QComboBox):
def __init__(self, item, column):
super(ComboBoxItem, self).__init__()
self.item = item
self.column = column
class Annotation(QtWidgets.QDialog):
_fieldIdx = 0
def __init__(self, parent, view):
super(Annotation, self).__init__(parent)
self.parent = parent
self.view = view
self.oshow = super(Annotation, self).show
import os
root = os.path.dirname(sys.argv[0])
self.ui = PyQt5.uic.loadUi(os.path.join(root, 'annotation.ui'), baseinstance=self)
# self.ei = ImportsEventFilter(plugin, self.ui.treeWidgetImports)
self.ei = treeEventFilter(view, self.ui.treeWidget)
self.ui.treeWidget.installEventFilter(self.ei)
self.initUI()
def newFieldName(self):
name = 'field_{}'.format(self._fieldIdx)
self._fieldIdx += 1
return name
def show(self):
# TODO: remember position? resize plugin windows when parent resize?
pwidth = self.parent.parent.size().width()
pheight = self.parent.parent.size().height()
width = self.ui.treeWidget.size().width()+15
height = self.ui.treeWidget.size().height()+15
self.setGeometry(pwidth - width - 15, pheight - height, width, height)
self.setFixedSize(width, height)
self.oshow()
def initUI(self):
self.setWindowTitle('Annotations')
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Shift+/"), self, self.close, self.close)
class treeEventFilter(QtCore.QObject):
def __init__(self, view, widget):
super(QtCore.QObject, self).__init__()
self.widget = widget
self.view = view
def eventFilter(self, watched, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Delete:
# get RVA column from treeView
item = self.widget.currentItem()
offset = item.getOffset()#int(str(item.text(1)),0)
size = item.getSize()#int(str(item.text(2)),0)
u = offset
v = offset + size
self.view.selector.removeSelection(u, v, TextSelection.SelectionType.PERMANENT)
# TODO: remove tree!
item.parent().removeChild(item)
#self.widget.takeTopLevelItem(self.widget.indexOfTopLevelItem(item))
#print item
#rva = self.widget.indexFromItem(item, 1).data().toString()
return False
|
szilveszter/django
|
refs/heads/master
|
tests/invalid_models_tests/test_relative_fields.py
|
12
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error, Warning as DjangoWarning
from django.db import models
from django.test.utils import override_settings
from django.test.testcases import skipIfDBFeature
from .base import IsolatedModelsTestCase
class RelativeFieldTests(IsolatedModelsTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, null=True, validators=[''])
errors = ModelM2M.check()
field = ModelM2M._meta.get_field('m2m')
expected = [
DjangoWarning(
'null has no effect on ManyToManyField.',
hint=None,
obj=field,
id='fields.W340',
)
]
expected.append(
DjangoWarning(
'ManyToManyField does not support validators.',
hint=None,
obj=field,
id='fields.W341',
)
)
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person',
through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, related_name="first")
second_person = models.ForeignKey(Person, related_name="second")
second_model = models.ForeignKey(Group)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument."),
hint=('If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, '
'through="AmbiguousRelationship").'),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person)
wrong_foreign_key = models.ForeignKey(WrongModel)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed."),
hint=None,
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self',
through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set_2")
second = models.ForeignKey(Person, related_name="rel_to_set_2")
third = models.ForeignKey(Person, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument."),
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""Using through_fields in a m2m with an intermediate model shouldn't mask its incompatibility with symmetry."""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'))
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
referee = models.ForeignKey(Person, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class Model(models.Model):
foreign_key = models.ForeignKey('AbstractModel')
class AbstractModel(models.Model):
class Meta:
abstract = True
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
m2m = models.ManyToManyField('AbstractModel')
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(Person,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'])
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
("None of the fields 'country_id', 'city_id' on model 'Person' "
"have a unique=True constraint."),
hint=None,
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappableModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappableModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m')
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappedModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappedModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m')
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
class AccessorClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child',
related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=("Rename field 'Child.m2m_clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.children'."),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
class ReverseQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=("Rename field 'Target.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class SelfReferentialM2MClashTests(IsolatedModelsTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self",
symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self",
symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self",
symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
class SelfReferentialFKClashTests(IsolatedModelsTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
class ComplexClashTests(IsolatedModelsTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
class M2mThroughFieldsTests(IsolatedModelsTestCase):
def test_m2m_field_argument_validation(self):
"""
Tests that ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
self.assertRaisesMessage(
ValueError, 'Cannot specify through_fields without a through model',
models.ManyToManyField, Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Tests that mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("'Invitation.invitee' is not a foreign key to 'Event'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339'),
Error(
("'Invitation.event' is not a foreign key to 'Fan'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339'),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Tests that providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invalid_field_1', 'invalid_field_2'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338'),
Error(
("The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338'),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
Tests that if ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event)
invitee = models.ForeignKey(Fan)
inviter = models.ForeignKey(Fan, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'."),
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
|
nivertech/duktape
|
refs/heads/master
|
website/buildimages.py
|
15
|
#!/bin/python
#
# Build data URIs for images. Results are manually embedded into CSS.
#
# https://en.wikipedia.org/wiki/Data_URI_scheme
#
import base64
if __name__ == '__main__':
for image in [
'bg-c.png', 'bg-ecmascript.png',
'bg-c-2.png', 'bg-ecmascript-2.png'
]:
f = open(image, 'rb')
data = f.read()
f.close()
data_uri = 'data:image/png;base64,' + base64.b64encode(data)
print('')
print('%s -> %s' % (image, data_uri))
print('')
|
CWatM/CWatM
|
refs/heads/version091
|
cwatm/hydrological_modules/soil.py
|
1
|
# -------------------------------------------------------------------------
# Name: Soil module
# Purpose:
#
# Author: PB
#
# Created: 15/07/2016
# Copyright: (c) PB 2016 based on PCRGLOBE, LISFLOOD, HBV
# -------------------------------------------------------------------------
from cwatm.management_modules.data_handling import *
class soil(object):
"""
**SOIL**
Calculation vertical transfer of water based on Arno scheme
**Global variables**
==================== ================================================================================ =========
Variable [self.var] Description Unit
==================== ================================================================================ =========
capRiseFrac fraction of a grid cell where capillar rise may happen m
cropKC crop coefficient for each of the 4 different land cover types (forest, irrigated --
storGroundwater simulated groundwater storage m
modflow Flag: True if modflow_coupling = True in settings file --
availWaterInfiltrati quantity of water reaching the soil after interception, more snowmelt m
interceptEvap simulated evaporation from water intercepted by vegetation m
potTranspiration Potential transpiration (after removing of evaporation) m
snowEvap total evaporation from snow for a snow layers m
fracVegCover Fraction of area covered by the corresponding landcover type
rootDepth
KSat1
KSat2
KSat3
genuM1
genuM2
genuM3
genuInvM1
genuInvM2
genuInvM3
ws1 Maximum storage capacity in layer 1 m
ws2 Maximum storage capacity in layer 2 m
ws3 Maximum storage capacity in layer 3 m
wres1 Residual storage capacity in layer 1 m
wres2 Residual storage capacity in layer 2 m
wres3 Residual storage capacity in layer 3 m
wrange1
wrange2
wrange3
wfc1 Soil moisture at field capacity in layer 1
wfc2 Soil moisture at field capacity in layer 2
wfc3 Soil moisture at field capacity in layer 3
wwp1 Soil moisture at wilting point in layer 1
wwp2 Soil moisture at wilting point in layer 2
wwp3 Soil moisture at wilting point in layer 3
kunSatFC12
kunSatFC23
arnoBeta
adjRoot
maxtopwater maximum heigth of topwater m
capillar Simulated flow from groundwater to the third CWATM soil layer m
EWRef potential evaporation rate from water surface m
FrostIndexThreshold Degree Days Frost Threshold (stops infiltration, percolation and capillary rise) --
FrostIndex FrostIndex - Molnau and Bissel (1983), A Continuous Frozen Ground Index for Floo --
actualET simulated evapotranspiration from soil, flooded area and vegetation m
soilLayers Number of soil layers --
soildepth Thickness of the first soil layer m
soildepth12 Total thickness of layer 2 and 3 m
w1 Simulated water storage in the layer 1 m
w2 Simulated water storage in the layer 2 m
w3 Simulated water storage in the layer 3 m
topwater quantity of water above the soil (flooding) m
directRunoff Simulated surface runoff m
interflow Simulated flow reaching runoff instead of groundwater m
openWaterEvap Simulated evaporation from open areas m
actTransTotal Total actual transpiration from the three soil layers m
actBareSoilEvap Simulated evaporation from the first soil layer m
percolationImp Fraction of area covered by the corresponding landcover type m
cropGroupNumber soil water depletion fraction, Van Diepen et al., 1988: WOFOST 6.0, p.86, Dooren --
cPrefFlow Factor influencing preferential flow (flow from surface to GW) --
act_irrConsumption actual irrgation water consumption m
potBareSoilEvap potential bare soil evaporation (calculated with minus snow evaporation) m
totalPotET Potential evaporation per land use class m
rws Transpiration reduction factor (in case of water stress) --
prefFlow Flow going directly from rainfall to groundwater m
infiltration Water actually infiltrating the soil m
capRiseFromGW Simulated capillary rise from groundwater m
NoSubSteps Number of sub steps to calculate soil percolation --
perc1to2 Simulated water flow from soil layer 1 to soil layer 2 m
perc2to3 Simulated water flow from soil layer 2 to soil layer 3 m
perc3toGW Simulated water flow from soil layer 3 to groundwater m
theta1 fraction of water in soil compartment 1 for each land use class --
theta2 fraction of water in soil compartment 2 for each land use class --
theta3 fraction of water in soil compartment 3 for each land use class --
actTransTotal_forest
actTransTotal_grassl
actTransTotal_paddy
actTransTotal_nonpad
before
gwRecharge groundwater recharge m
==================== ================================================================================ =========
**Functions**
"""
def __init__(self, model):
self.var = model.var
self.model = model
def initial(self):
"""
Initial part of the soil module
* Initialize all the hydraulic properties of soil
* Set soil depth
"""
self.var.soilLayers = 3
# --- Topography -----------------------------------------------------
# maps of relative elevation above flood plains
dzRel = ['dzRel0001','dzRel0005',
'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',
'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']
for i in dzRel:
vars(self.var)[i] = readnetcdfWithoutTime(cbinding('relativeElevation'),i)
# Fraction of area where percolation to groundwater is impeded [dimensionless]
self.var.percolationImp = np.maximum(0,np.minimum(1,loadmap('percolationImp') * loadmap('factor_interflow')))
# ------------ Preferential Flow constant ------------------------------------------
self.var.cropGroupNumber = loadmap('cropgroupnumber')
# soil water depletion fraction, Van Diepen et al., 1988: WOFOST 6.0, p.86, Doorenbos et. al 1978
# crop groups for formular in van Diepen et al, 1988
# ------------ Preferential Flow constant ------------------------------------------
self.var.cPrefFlow = loadmap('preferentialFlowConstant')
# ------------ SOIL DEPTH ----------------------------------------------------------
# soil thickness and storage
#soilDepthLayer = [('soildepth', 'SoilDepth'),('storCap','soilWaterStorageCap')]
soilDepthLayer = [('soildepth', 'SoilDepth')]
for layer,property in soilDepthLayer:
vars(self.var)[layer] = np.tile(globals.inZero, (self.var.soilLayers, 1))
# first soil layer = 5 cm
self.var.soildepth[0] = 0.05 + globals.inZero
# second soul layer minimum 5cm
self.var.soildepth[1] = np.maximum(0.05, loadmap('StorDepth1') - self.var.soildepth[0])
# soil depth[1] is inc/decr by a calibration factor
#self.var.soildepth[1] = self.var.soildepth[1] * loadmap('soildepth_factor')
#self.var.soildepth[1] = np.maximum(0.05, self.var.soildepth[1])
# corrected by the calibration factor, total soil depth stays the same
#self.var.soildepth[2] = loadmap('StorDepth2') + (1. - loadmap('soildepth_factor') * self.var.soildepth[1])
#self.var.soildepth[2] = loadmap('StorDepth2') * loadmap('soildepth_factor')
self.var.soildepth[2] = loadmap('StorDepth2')
self.var.soildepth[2] = np.maximum(0.05, self.var.soildepth[2])
# Calibration
soildepth_factor = loadmap('soildepth_factor')
self.var.soildepth[1] = self.var.soildepth[1] * soildepth_factor
self.var.soildepth[2] = self.var.soildepth[2] * soildepth_factor
self.var.soildepth12 = self.var.soildepth[1] + self.var.soildepth[2]
ii= 0
# report("C:/work/output2/soil.map", self.var.soildepth12)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def dynamic(self, coverType, No):
"""
Dynamic part of the soil module
For each of the land cover classes the vertical water transport is simulated
Distribution of water holding capiacity in 3 soil layers based on saturation excess overland flow, preferential flow
Dependend on soil depth, soil hydraulic parameters
"""
# ---------------------------------------------------------
if checkOption('calcWaterBalance'):
preStor1 = self.var.w1[No].copy()
preStor2 = self.var.w2[No].copy()
preStor3 = self.var.w3[No].copy()
pretopwater = self.var.topwater
# -----------------------------------------------------------
# from evaporation
# calculate potential bare soil evaporation and transpiration
# self.var.potBareSoilEvap = self.var.cropCorrect * self.var.minCropKC[No] * self.var.ETRef
# potTranspiration: Transpiration for each land cover class
# self.var.potTranspiration[No] = self.var.cropCorrect * self.var.cropKC * self.var.ETRef - self.var.potBareSoilEvap
# from interception module
# self.var.potTranspiration[No] = np.maximum(0, self.var.potTranspiration[No] - self.var.interceptEvap[No])
# # interceptEvap is the first flux in ET, soil evapo and transpiration are added later
# self.var.actualET[No] = self.var.interceptEvap[No].copy()
#if (dateVar['curr'] == 130) and (No==2):
# ii=1
availWaterInfiltration = self.var.availWaterInfiltration[No].copy()
availWaterInfiltration = availWaterInfiltration + self.var.act_irrConsumption[No]
# availWaterInfiltration = water net from precipitation (- soil - interception - snow + snow melt) + water for irrigation
if coverType == 'irrPaddy':
# depending on the crop calender -> here if cropKC > 0.75 paddies are flooded to 50mm (as set in settings file)
#if self.var.cropKC[No]>0.75:
# ii = 1
self.var.topwater = np.where(self.var.cropKC[No] > 0.75, self.var.topwater + availWaterInfiltration, self.var.topwater)
# open water evaporation from the paddy field - using potential evaporation from open water
self.var.openWaterEvap[No] = np.minimum(np.maximum(0., self.var.topwater), self.var.EWRef)
self.var.topwater = self.var.topwater - self.var.openWaterEvap[No]
# if paddies are flooded, avail water is calculated before: top + avail, otherwise it is calculated here
availWaterInfiltration = np.where(self.var.cropKC[No] > 0.75, self.var.topwater, self.var.topwater + availWaterInfiltration)
# open water can evaporate more than maximum bare soil + transpiration because it is calculated from open water pot evaporation
#h = self.var.potBareSoilEvap - self.var.openWaterEvap[No]
self.var.potBareSoilEvap = np.maximum(0.,self.var.potBareSoilEvap - self.var.openWaterEvap[No])
# if open water revaporation is bigger than bare soil, transpiration rate is reduced
# self.var.potTranspiration[No] = np.where( h > 0, self.var.potTranspiration[No], np.maximum(0.,self.var.potTranspiration[No] + h))
else:
self.var.openWaterEvap[No] = 0.
#if (dateVar['curr'] >= 0) and (No==3):
# ii=1
# add capillary rise from groundwater if modflow is used
if self.var.modflow:
### if GW capillary rise saturates soil layers, water is sent to the above layer, then to runoff
self.var.w3[No] = self.var.w3[No] + self.var.capillar
# CAPRISE from GW to soilayer 3 , if this is full it is send to soil layer 2
self.var.w2[No] = self.var.w2[No] + np.where(self.var.w3[No] > self.var.ws3[No], self.var.w3[No] - self.var.ws3[No], 0)
self.var.w3[No] = np.minimum(self.var.ws3[No], self.var.w3[No])
# CAPRISE from GW to soilayer 2 , if this is full it is send to soil layer 1
self.var.w1[No] = self.var.w1[No] + np.where(self.var.w2[No] > self.var.ws2[No], self.var.w2[No] - self.var.ws2[No], 0)
self.var.w2[No] = np.minimum(self.var.ws2[No], self.var.w2[No])
# CAPRISE from GW to soilayer 1 , if this is full it is send to RUNOFF
saverunofffromGW = + np.where(self.var.w1[No] > self.var.ws1[No], self.var.w1[No] - self.var.ws1[No], 0)
self.var.w1[No]= np.minimum(self.var.ws1[No], self.var.w1[No])
# ---------------------------------------------------------
# calculate transpiration
# ***** SOIL WATER STRESS ************************************
etpotMax = np.minimum(0.1 * (self.var.totalPotET[No] * 1000.), 1.0)
# to avoid a strange behaviour of the p-formula's, ETRef is set to a maximum of 10 mm/day.
if coverType == 'irrPaddy' or coverType == 'irrNonPaddy':
p = 1 / (0.76 + 1.5 * etpotMax) - 0.4
# soil water depletion fraction (easily available soil water) # Van Diepen et al., 1988: WOFOST 6.0, p.87.
p = p + (etpotMax - 0.6) / 4
# correction for crop group 1 (Van Diepen et al, 1988) -> p between 0.14 - 0.77
# The crop group number is a indicator of adaptation to dry climate,
# e.g. olive groves are adapted to dry climate, therefore they can extract more water from drying out soil than e.g. rice.
# The crop group number of olive groves is 4 and of rice fields is 1
# for irrigation it is expected that the crop has a low adaptation to dry climate
else:
p = 1 / (0.76 + 1.5 * etpotMax) - 0.10 * (5 - self.var.cropGroupNumber)
# soil water depletion fraction (easily available soil water)
# Van Diepen et al., 1988: WOFOST 6.0, p.87
# to avoid a strange behaviour of the p-formula's, ETRef is set to a maximum of
# 10 mm/day. Thus, p will range from 0.15 to 0.45 at ETRef eq 10 and
# CropGroupNumber 1-5
p = np.where(self.var.cropGroupNumber <= 2.5, p + (etpotMax - 0.6) / (self.var.cropGroupNumber * (self.var.cropGroupNumber + 3)), p)
# correction for crop groups 1 and 2 (Van Diepen et al, 1988)
p = np.maximum(np.minimum(p, 1.0), 0.)
# p is between 0 and 1 => if p =1 wcrit = wwp, if p= 0 wcrit = wfc
# p is closer to 0 if evapo is bigger and cropgroup is smaller
wCrit1 = ((1 - p) * (self.var.wfc1[No] - self.var.wwp1[No])) + self.var.wwp1[No]
wCrit2 = ((1 - p) * (self.var.wfc2[No] - self.var.wwp2[No])) + self.var.wwp2[No]
wCrit3 = ((1 - p) * (self.var.wfc3[No] - self.var.wwp3[No])) + self.var.wwp3[No]
# Transpiration reduction factor (in case of water stress)
rws1 = divideValues((self.var.w1[No] - self.var.wwp1[No]),(wCrit1 - self.var.wwp1[No]), default = 1.)
rws2 = divideValues((self.var.w2[No] - self.var.wwp2[No]), (wCrit2 - self.var.wwp2[No]), default=1.)
rws3 = divideValues((self.var.w3[No] - self.var.wwp3[No]), (wCrit3 - self.var.wwp3[No]), default=1.)
#with np.errstate(invalid='ignore', divide='ignore'):
#rws1 = np.where((wCrit1 - self.var.wwp1[No]) > 0, (self.var.w1[No] - self.var.wwp1[No]) / (wCrit1 - self.var.wwp1[No]), 1.0)
#rws2 = np.where((wCrit2 - self.var.wwp2[No]) > 0, (self.var.w2[No] - self.var.wwp2[No]) / (wCrit2 - self.var.wwp2[No]), 1.0)
#rws3 = np.where((wCrit3 - self.var.wwp3[No]) > 0, (self.var.w3[No] - self.var.wwp3[No]) / (wCrit3 - self.var.wwp3[No]), 1.0)
rws1 = np.maximum(np.minimum(1., rws1), 0.) * self.var.adjRoot[0][No]
rws2 = np.maximum(np.minimum(1., rws2), 0.) * self.var.adjRoot[1][No]
rws3 = np.maximum(np.minimum(1., rws3), 0.) * self.var.adjRoot[2][No]
self.var.rws = rws1 + rws2 + rws3
TaMax = self.var.potTranspiration[No] * self.var.rws
# transpiration is 0 when soil is frozen
TaMax = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0., TaMax)
ta1 = np.maximum(np.minimum(TaMax * self.var.adjRoot[0][No], self.var.w1[No] - self.var.wwp1[No]), 0.0)
ta2 = np.maximum(np.minimum(TaMax * self.var.adjRoot[1][No], self.var.w2[No] - self.var.wwp2[No]), 0.0)
ta3 = np.maximum(np.minimum(TaMax * self.var.adjRoot[2][No], self.var.w3[No] - self.var.wwp3[No]), 0.0)
#if (dateVar['curr'] == 23) and (No==1):
# ii=1
# #print ('t', self.var.w1[No][0:3])
self.var.w1[No] = self.var.w1[No] - ta1
self.var.w2[No] = self.var.w2[No] - ta2
self.var.w3[No] = self.var.w3[No] - ta3
# -------------------------------------------------------------
# Actual potential bare soil evaporation - upper layer
self.var.actBareSoilEvap[No] = np.minimum(self.var.potBareSoilEvap,np.maximum(0.,self.var.w1[No] - self.var.wres1[No]))
self.var.actBareSoilEvap[No] = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0., self.var.actBareSoilEvap[No])
# no bare soil evaporation in the inundated paddy field
if coverType == 'irrPaddy':
self.var.actBareSoilEvap[No] = np.where(self.var.topwater > 0., 0., self.var.actBareSoilEvap[No])
self.var.w1[No] = self.var.w1[No] - self.var.actBareSoilEvap[No]
# -------------------------------------------------------------
# Infiltration capacity
# ========================================
# first 2 soil layers to estimate distribution between runoff and infiltration
soilWaterStorage = self.var.w1[No] + self.var.w2[No]
soilWaterStorageCap = self.var.ws1[No] + self.var.ws2[No]
relSat = soilWaterStorage / soilWaterStorageCap
relSat = np.minimum(relSat, 1.0)
#if np.min(self.var.w1[No])< 0.:
# ii =1
#if (dateVar['curr'] == 23) and (No==1):
# ii=1
# print (No, self.var.w1[No][0:3])
satAreaFrac = 1 - (1 - relSat) ** self.var.arnoBeta[No]
# Fraction of pixel that is at saturation as a function of
# the ratio Theta1/ThetaS1. Distribution function taken from
# Zhao,1977, as cited in Todini, 1996 (JoH 175, 339-382)
satAreaFrac = np.maximum(np.minimum(satAreaFrac, 1.0), 0.0)
store = soilWaterStorageCap / (self.var.arnoBeta[No] + 1)
potBeta = (self.var.arnoBeta[No] + 1) / self.var.arnoBeta[No]
potInf = store - store * (1 - (1 - satAreaFrac) ** potBeta)
# ------------------------------------------------------------------
# calculate preferential flow
if coverType == 'irrPaddy' or not(checkOption('preferentialFlow')):
self.var.prefFlow[No] = 0.
else:
self.var.prefFlow[No] = availWaterInfiltration * relSat ** self.var.cPrefFlow
self.var.prefFlow[No] = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0.0, self.var.prefFlow[No])
# ---------------------------------------------------------
# calculate infiltration
# infiltration, limited with KSat1 and available water in topWaterLayer
self.var.infiltration[No] = np.minimum(potInf, availWaterInfiltration - self.var.prefFlow[No])
self.var.infiltration[No] = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0.0, self.var.infiltration[No])
self.var.directRunoff[No] = np.maximum(0.,availWaterInfiltration - self.var.infiltration[No] - self.var.prefFlow[No])
if coverType == 'irrPaddy':
self.var.topwater = np.maximum(0., self.var.topwater - self.var.infiltration[No])
# if paddy fields flooded only runoff if topwater > 0.05m
h = np.maximum(0., self.var.topwater- self.var.maxtopwater)
self.var.directRunoff[No] = np.where(self.var.cropKC[No] > 0.75, h, self.var.directRunoff[No])
self.var.topwater = np.maximum(0., self.var.topwater - self.var.directRunoff[No])
### ModFlow
if self.var.modflow:
self.var.directRunoff[No]=self.var.directRunoff[No] + saverunofffromGW
# ADDING EXCESS WATER FROM GW CAPILLARY RISE
# infiltration to soilayer 1 , if this is full it is send to soil layer 2
self.var.w1[No] = self.var.w1[No] + self.var.infiltration[No]
self.var.w2[No] = self.var.w2[No] + np.where(self.var.w1[No] > self.var.ws1[No], self.var.w1[No] - self.var.ws1[No], 0)
self.var.w1[No] = np.minimum(self.var.ws1[No], self.var.w1[No])
## ----------------------------------------------------------
# to the water demand module # could not be done before from landcoverType_module because readAvlWater is needed
# for plants availailabe water
#availWaterPlant1 = np.maximum(0., self.var.w1[No] - self.var.wwp1[No]) * self.var.rootDepth[0][No]
#availWaterPlant2 = np.maximum(0., self.var.w2[No] - self.var.wwp2[No]) * self.var.rootDepth[1][No]
#availWaterPlant3 = np.maximum(0., self.var.w3[No] - self.var.wwp3[No]) * self.var.rootDepth[2][No]
#readAvlWater = availWaterPlant1 + availWaterPlant2 + availWaterPlant3
# Percolation -----------------------------------------------
if No == 0:
NoSoil = 0
else:
NoSoil = 1
# Available water in both soil layers [m]
availWater1 = np.maximum(0.,self.var.w1[No] - self.var.wres1[No])
availWater2 = np.maximum(0.,self.var.w2[No] - self.var.wres2[No])
availWater3 = np.maximum(0.,self.var.w3[No] - self.var.wres3[No])
satTerm2 = availWater2 / self.var.wrange2[No]
satTerm3 = availWater3 / self.var.wrange3[No]
# Saturation term in Van Genuchten equation (always between 0 and 1)
satTerm2 = np.maximum(np.minimum(satTerm2, 1.0), 0)
satTerm3 = np.maximum(np.minimum(satTerm3, 1.0), 0)
# Unsaturated conductivity
kUnSat2 = self.var.KSat2[NoSoil] * np.sqrt(satTerm2) * np.square(1 - (1 - satTerm2 ** self.var.genuInvM2[NoSoil]) ** self.var.genuM2[NoSoil])
kUnSat3 = self.var.KSat3[NoSoil] * np.sqrt(satTerm3) * np.square(1 - (1 - satTerm3 ** self.var.genuInvM3[NoSoil]) ** self.var.genuM3[NoSoil])
## ----------------------------------------------------------
# Capillar Rise
satTermFC1 = np.maximum(0., self.var.w1[No] - self.var.wres1[No]) / (self.var.wfc1[No] - self.var.wres1[No])
satTermFC2 = np.maximum(0., self.var.w2[No] - self.var.wres2[No]) / (self.var.wfc2[No] - self.var.wres2[No])
satTermFC3 = np.maximum(0., self.var.w3[No] - self.var.wres3[No]) / (self.var.wfc3[No] - self.var.wres3[No])
capRise1 = np.minimum(np.maximum(0., (1 - satTermFC1) * kUnSat2), self.var.kunSatFC12[No])
capRise2 = np.minimum(np.maximum(0., (1 - satTermFC2) * kUnSat3), self.var.kunSatFC23[No])
if self.var.modflow:
# from Modflow
self.var.capRiseFromGW[No] = self.var.capillar
else:
self.var.capRiseFromGW[No] = np.maximum(0., (1 - satTermFC3) * np.sqrt(self.var.KSat3[NoSoil] * kUnSat3))
self.var.capRiseFromGW[No] = 0.5 * self.var.capRiseFrac * self.var.capRiseFromGW[No]
self.var.capRiseFromGW[No] = np.minimum(np.maximum(0., self.var.storGroundwater), self.var.capRiseFromGW[No])
self.var.w1[No] = self.var.w1[No] + capRise1
self.var.w2[No] = self.var.w2[No] - capRise1 + capRise2
if self.var.modflow:
self.var.w3[No] = self.var.w3[No] - capRise2
# GW capillary rise has already been added to the soil
else:
self.var.w3[No] = self.var.w3[No] - capRise2 + self.var.capRiseFromGW[No]
# Percolation -----------------------------------------------
# Available water in both soil layers [m]
availWater1 = np.maximum(0.,self.var.w1[No] - self.var.wres1[No])
availWater2 = np.maximum(0.,self.var.w2[No] - self.var.wres2[No])
availWater3 = np.maximum(0.,self.var.w3[No] - self.var.wres3[No])
# Available storage capacity in subsoil
capLayer2 = self.var.ws2[No] - self.var.w2[No]
capLayer3 = self.var.ws3[No] - self.var.w3[No]
satTerm1 = availWater1 / self.var.wrange1[No]
satTerm2 = availWater2 / self.var.wrange2[No]
satTerm3 = availWater3 / self.var.wrange3[No]
# Saturation term in Van Genuchten equation (always between 0 and 1)
satTerm1 = np.maximum(np.minimum(satTerm1, 1.0), 0)
satTerm2 = np.maximum(np.minimum(satTerm2, 1.0), 0)
satTerm3 = np.maximum(np.minimum(satTerm3, 1.0), 0)
# Unsaturated conductivity
kUnSat1 = self.var.KSat1[NoSoil] * np.sqrt(satTerm1) * np.square(1 - (1 - satTerm1 ** self.var.genuInvM1[NoSoil]) ** self.var.genuM1[NoSoil])
kUnSat2 = self.var.KSat2[NoSoil] * np.sqrt(satTerm2) * np.square(1 - (1 - satTerm2 ** self.var.genuInvM2[NoSoil]) ** self.var.genuM2[NoSoil])
kUnSat3 = self.var.KSat3[NoSoil] * np.sqrt(satTerm3) * np.square(1 - (1 - satTerm3 ** self.var.genuInvM3[NoSoil]) ** self.var.genuM3[NoSoil])
"""
# Courant condition for computed soil moisture fluxes:
# if Courant gt CourantCrit: sub-steps needed for required numerical accuracy
with np.errstate(invalid='ignore', divide='ignore'):
courant1to2 = np.where(availWater1 == 0, 0, kUnSat1 / availWater1)
courant2to3 = np.where(availWater2 == 0, 0, kUnSat2 / availWater2)
courant3toGW = np.where(availWater3 == 0, 0, kUnSat3 / availWater3)
# Flow between soil layers and flow to GW
# need to be numerically stable, so number of sub-steps is
# based on process with largest Courant number
courantSoil = np.maximum(courant1to2, courant2to3, courant3toGW)
# Number of sub-steps needed for required numerical
# accuracy. Always greater than or equal to 1
# Do not change, default value of 2.5. Generally combines sufficient numerical accuracy within a limited number of sub - steps
NoSubS = np.maximum(1, np.ceil(courantSoil * 2.5))
self.var.NoSubSteps = int(np.nanmax(NoSubS))
"""
self.var.NoSubSteps = 3
DtSub = 1. / self.var.NoSubSteps
# Copy current value of W1 and W2 to temporary variables,
# because computed fluxes may need correction for storage
# capacity of subsoil and in case soil is frozen (after loop)
wtemp1 = self.var.w1[No].copy()
wtemp2 = self.var.w2[No].copy()
wtemp3 = self.var.w3[No].copy()
# Initialize top- to subsoil flux (accumulated value for all sub-steps)
# Initialize fluxes out of subsoil (accumulated value for all sub-steps)
self.var.perc1to2[No] = 0
self.var.perc2to3[No] = 0
self.var.perc3toGW[No] = 0
# Start iterating
for i in range(self.var.NoSubSteps):
if i > 0:
# Saturation term in Van Genuchten equation
satTerm1 = np.maximum(0., wtemp1 - self.var.wres1[No])/ self.var.wrange1[No]
satTerm2 = np.maximum(0., wtemp2 - self.var.wres2[No]) / self.var.wrange2[No]
satTerm3 = np.maximum(0., wtemp3 - self.var.wres3[No]) / self.var.wrange3[No]
satTerm1 = np.maximum(np.minimum(satTerm1, 1.0), 0)
satTerm2 = np.maximum(np.minimum(satTerm2, 1.0), 0)
satTerm3 = np.maximum(np.minimum(satTerm3, 1.0), 0)
# Unsaturated hydraulic conductivities
kUnSat1 = self.var.KSat1[NoSoil] * np.sqrt(satTerm1) * np.square(1 - (1 - satTerm1 ** self.var.genuInvM1[NoSoil]) ** self.var.genuM1[NoSoil])
kUnSat2 = self.var.KSat2[NoSoil] * np.sqrt(satTerm2) * np.square(1 - (1 - satTerm2 ** self.var.genuInvM2[NoSoil]) ** self.var.genuM2[NoSoil])
kUnSat3 = self.var.KSat3[NoSoil] * np.sqrt(satTerm3) * np.square(1 - (1 - satTerm3 ** self.var.genuInvM3[NoSoil]) ** self.var.genuM3[NoSoil])
# Flux from top- to subsoil
subperc1to2 = np.minimum(availWater1,np.minimum(kUnSat1 * DtSub, capLayer2))
subperc2to3 = np.minimum(availWater2,np.minimum(kUnSat2 * DtSub, capLayer3))
subperc3toGW = np.minimum(availWater3,np.minimum(kUnSat3 * DtSub, availWater3))
# Update water balance for all layers
availWater1 = availWater1 - subperc1to2
availWater2 = availWater2 + subperc1to2 - subperc2to3
availWater3 = availWater3 + subperc2to3 - subperc3toGW
# Update WTemp1 and WTemp2
wtemp1 = availWater1 + self.var.wres1[No]
wtemp2 = availWater2 + self.var.wres2[No]
wtemp3 = availWater3 + self.var.wres3[No]
# Update available storage capacity in layer 2,3
capLayer2 = self.var.ws2[No] - wtemp2
capLayer3 = self.var.ws3[No] - wtemp3
self.var.perc1to2[No] += subperc1to2
self.var.perc2to3[No] += subperc2to3
self.var.perc3toGW[No] += subperc3toGW
# When the soil is frozen (frostindex larger than threshold), no perc1 and 2
self.var.perc1to2[No] = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0,self.var.perc1to2[No])
self.var.perc2to3[No] = np.where(self.var.FrostIndex > self.var.FrostIndexThreshold, 0,self.var.perc2to3[No])
# Update soil moisture
self.var.w1[No] = self.var.w1[No] - self.var.perc1to2[No]
self.var.w2[No] = self.var.w2[No] + self.var.perc1to2[No] - self.var.perc2to3[No]
self.var.w3[No] = self.var.w3[No] + self.var.perc2to3[No] - self.var.perc3toGW[No]
# Compute the amount of water that could not infiltrate and add this water to the surface runoff
self.var.theta1[No] = self.var.w1[No] / self.var.rootDepth[0][No]
self.var.theta2[No] = self.var.w2[No] / self.var.rootDepth[1][No]
self.var.theta3[No] = self.var.w3[No] / self.var.rootDepth[2][No]
# ---------------------------------------------------------------------------------------------
# total actual transpiration
self.var.actTransTotal[No] = ta1 + ta2 + ta3
self.var.actTransTotal_forest = self.var.actTransTotal[0] * self.var.fracVegCover[0]
self.var.actTransTotal_grasslands = self.var.actTransTotal[1] * self.var.fracVegCover[1]
self.var.actTransTotal_paddy = self.var.actTransTotal[2]*self.var.fracVegCover[2]
self.var.actTransTotal_nonpaddy = self.var.actTransTotal[3]*self.var.fracVegCover[3]
self.var.before = self.var.actualET[No].copy()
# total actual evaporation + transpiration
self.var.actualET[No] = self.var.actualET[No] + self.var.actBareSoilEvap[No] + self.var.openWaterEvap[No] + self.var.actTransTotal[No]
# actual evapotranspiration can be bigger than pot, because openWater is taken from pot open water evaporation, therefore self.var.totalPotET[No] is adjusted
self.var.totalPotET[No] = np.maximum(self.var.totalPotET[No], self.var.actualET[No])
# groundwater recharge
toGWorInterflow = self.var.perc3toGW[No] + self.var.prefFlow[No]
self.var.interflow[No] = self.var.percolationImp * toGWorInterflow
if self.var.modflow:
self.var.gwRecharge[No] = (1 - self.var.percolationImp) * toGWorInterflow
else:
self.var.gwRecharge[No] = (1 - self.var.percolationImp) * toGWorInterflow - self.var.capRiseFromGW[No]
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.availWaterInfiltration[No], self.var.capRiseFromGW[No], self.var.act_irrConsumption[No]], # In water demand included in availwater
[self.var.directRunoff[No],self.var.perc3toGW[No], self.var.prefFlow[No] ,
self.var.actTransTotal[No], self.var.actBareSoilEvap[No], self.var.openWaterEvap[No]], # Out
[ preStor1, preStor2, preStor3,pretopwater], # prev storage
[self.var.w1[No], self.var.w2[No], self.var.w3[No],self.var.topwater],
"Soil_1_"+str(No), False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.availWaterInfiltration[No], self.var.act_irrConsumption[No]], # In
[self.var.directRunoff[No], self.var.interflow[No],self.var.gwRecharge[No],
self.var.actTransTotal[No], self.var.actBareSoilEvap[No], self.var.openWaterEvap[No]], # Out
[ preStor1, preStor2, preStor3,pretopwater], # prev storage
[self.var.w1[No], self.var.w2[No], self.var.w3[No],self.var.topwater],
"Soil_2", False)
# openWaterEvap in because it is taken from availWater directly, out because it taken out immediatly. It is not a soil process indeed
if option['calcWaterBalance']:
self.model.waterbalance_module.waterBalanceCheck(
[self.var.availWaterInfiltration[No], self.var.act_irrConsumption[No],self.var.snowEvap,self.var.interceptEvap[No]], # In
[self.var.directRunoff[No], self.var.interflow[No],self.var.gwRecharge[No],
self.var.actualET[No]], # Out
[preStor1, preStor2, preStor3,pretopwater], # prev storage
[self.var.w1[No], self.var.w2[No], self.var.w3[No],self.var.topwater],
"Soil_AllSoil", False)
|
pauloschilling/sentry
|
refs/heads/master
|
src/sentry/rules/actions/notify_event.py
|
25
|
"""
sentry.rules.actions.notify_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.plugins import plugins
from sentry.rules.actions.base import EventAction
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
class NotifyEventAction(EventAction):
label = 'Send a notification (for all enabled services)'
def get_plugins(self):
from sentry.plugins.bases.notify import NotificationPlugin
results = []
for plugin in plugins.for_project(self.project, version=1):
if not isinstance(plugin, NotificationPlugin):
continue
results.append(plugin)
for plugin in plugins.for_project(self.project, version=2):
for notifier in (safe_execute(plugin.get_notifiers) or ()):
results.append(notifier)
return results
def after(self, event, state):
group = event.group
for plugin in self.get_plugins():
if not safe_execute(plugin.should_notify, group=group, event=event):
continue
metrics.incr('notifications.sent.{}'.format(plugin.slug))
yield self.future(plugin.rule_notify)
|
clinicalml/theanomodels
|
refs/heads/master
|
utils/checkin.py
|
1
|
import smtplib
from email.mime.text import MIMEText
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
fp = open(textfile, 'rb')
# Create a text/plain message
msg = MIMEText(fp.read())
fp.close()
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'The contents of %s' % textfile
msg['From'] = me
msg['To'] =
# Send the message via our own SMTP server, but don't include the
s = smtplib.SMTP('localhost')
s.sendmail(me, [you], msg.as_string())
s.quit()
|
JasonCC/stm32radio
|
refs/heads/master
|
rt-thread/components/rtgui/common/freetype/builds/mac/ascii2mpw.py
|
830
|
#!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
|
PlayUAV/MissionPlanner
|
refs/heads/master
|
Lib/uu.py
|
62
|
#! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
opened_files = []
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
in_file = open(in_file)
opened_files.append(in_file)
try:
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith('begin'):
continue
hdrfields = hdr.split(' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
output = sys.stdout
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, basestring):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, basestring):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
|
matthiasdiener/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/bertini/package.py
|
5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bertini(AutotoolsPackage):
"""Bertini is a general-purpose solver, written in C, that was created
for research about polynomial continuation. It solves for the numerical
solution of systems of polynomial equations using homotopy continuation."""
homepage = "https://bertini.nd.edu/"
url = "https://bertini.nd.edu/BertiniSource_v1.5.tar.gz"
version('1.5', 'e3f6cc6e7f9a0cf1d73185e8671af707')
variant('mpi', default=True, description='Compile in parallel')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('gmp')
depends_on('mpfr')
depends_on('mpi', when='+mpi')
|
sheqi/TVpgGLM
|
refs/heads/master
|
test/practice6_pystan_hmc_Qi_loop_test.py
|
1
|
import pickle
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pyglm.utils.utils import expand_scalar, compute_optimal_rotation
dim = 2
N = 20
r = 1 + np.arange(N) // (N/2.)
th = np.linspace(0, 4 * np.pi, N, endpoint=False)
x = r * np.cos(th)
y = r * np.sin(th)
L = np.hstack((x[:, None], y[:, None]))
L1 = np.random.randn(N, dim)
W = np.zeros((N, N))
# Distance matrix
D = ((L[:, None, :] - L[None, :, :]) ** 2).sum(2)
sig = np.exp(-D/2)
Sig = np.tile(sig[:, :, None, None], (1, 1, 1, 1))
Mu = expand_scalar(0, (N, N, 1))
for n in range(N):
for m in range(N):
W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])
aa = 1.0
bb = 1.0
cc = 1.0
sm = pickle.load(open('/Users/pillowlab/Dropbox/pyglm-master/Practices/model.pkl', 'rb'))
new_data = dict(N=N, W=W, B=dim)
for i in range(100):
fit = sm.sampling(data=new_data, iter=100, warmup=50, chains=1, init=[dict(l=L1, sigma=aa)],
control=dict(stepsize=0.001))
samples = fit.extract(permuted=True)
aa = np.mean(samples['sigma'])
#aa = samples['sigma'][-1]
#bb = np.mean(samples['eta'])
#cc = np.mean(samples['rho'])
L1 = np.mean(samples['l'], 0)
#L1 = samples['l'][-1]
R = compute_optimal_rotation(L1, L)
L1 = np.dot(L1, R)
plt.scatter(L1[:,0],L1[:,1])
plt.scatter(L[:,0],L[:,1])
|
mindbody/API-Examples
|
refs/heads/master
|
SDKs/Python/test/test_checkout_appointment_booking_request.py
|
1
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.checkout_appointment_booking_request import CheckoutAppointmentBookingRequest # noqa: E501
from swagger_client.rest import ApiException
class TestCheckoutAppointmentBookingRequest(unittest.TestCase):
"""CheckoutAppointmentBookingRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCheckoutAppointmentBookingRequest(self):
"""Test CheckoutAppointmentBookingRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.checkout_appointment_booking_request.CheckoutAppointmentBookingRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
Emergya/icm-openedx-educamadrid-platform-basic
|
refs/heads/ci
|
lms/djangoapps/certificates/tests/test_cert_management.py
|
70
|
"""Tests for the resubmit_error_certificates management command. """
import ddt
from contextlib import contextmanager
from django.core.management.base import CommandError
from nose.plugins.attrib import attr
from django.test.utils import override_settings
from mock import patch
from opaque_keys.edx.locator import CourseLocator
from certificates.tests.factories import BadgeAssertionFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.management.commands import resubmit_error_certificates, regenerate_user, ungenerated_certs
from certificates.models import GeneratedCertificate, CertificateStatuses, BadgeAssertion
class CertificateManagementTest(ModuleStoreTestCase):
"""
Base test class for Certificate Management command tests.
"""
# Override with the command module you wish to test.
command = resubmit_error_certificates
def setUp(self):
super(CertificateManagementTest, self).setUp()
self.user = UserFactory.create()
self.courses = [
CourseFactory.create()
for __ in range(3)
]
def _create_cert(self, course_key, user, status):
"""Create a certificate entry. """
# Enroll the user in the course
CourseEnrollmentFactory.create(
user=user,
course_id=course_key
)
# Create the certificate
GeneratedCertificate.objects.create(
user=user,
course_id=course_key,
status=status
)
def _run_command(self, *args, **kwargs):
"""Run the management command to generate a fake cert. """
command = self.command.Command()
return command.handle(*args, **kwargs)
def _assert_cert_status(self, course_key, user, expected_status):
"""Check the status of a certificate. """
cert = GeneratedCertificate.objects.get(user=user, course_id=course_key)
self.assertEqual(cert.status, expected_status)
@attr('shard_1')
@ddt.ddt
class ResubmitErrorCertificatesTest(CertificateManagementTest):
"""Tests for the resubmit_error_certificates management command. """
def test_resubmit_error_certificate(self):
# Create a certificate with status 'error'
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error)
# Re-submit all certificates with status 'error'
with check_mongo_calls(1):
self._run_command()
# Expect that the certificate was re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
def test_resubmit_error_certificate_in_a_course(self):
# Create a certificate with status 'error'
# in three courses.
for idx in range(3):
self._create_cert(self.courses[idx].id, self.user, CertificateStatuses.error)
# Re-submit certificates for two of the courses
self._run_command(course_key_list=[
unicode(self.courses[0].id),
unicode(self.courses[1].id)
])
# Expect that the first two courses have been re-submitted,
# but not the third course.
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[2].id, self.user, CertificateStatuses.error)
@ddt.data(
CertificateStatuses.deleted,
CertificateStatuses.deleting,
CertificateStatuses.downloadable,
CertificateStatuses.generating,
CertificateStatuses.notpassing,
CertificateStatuses.regenerating,
CertificateStatuses.restricted,
CertificateStatuses.unavailable,
)
def test_resubmit_error_certificate_skips_non_error_certificates(self, other_status):
# Create certificates with an error status and some other status
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error)
self._create_cert(self.courses[1].id, self.user, other_status)
# Re-submit certificates for all courses
self._run_command()
# Only the certificate with status "error" should have been re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, other_status)
def test_resubmit_error_certificate_none_found(self):
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.downloadable)
self._run_command()
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.downloadable)
def test_course_caching(self):
# Create multiple certificates for the same course
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
# Verify that we make only one Mongo query
# because the course is cached.
with check_mongo_calls(1):
self._run_command()
def test_invalid_course_key(self):
invalid_key = u"invalid/"
with self.assertRaisesRegexp(CommandError, invalid_key):
self._run_command(course_key_list=[invalid_key])
def test_course_does_not_exist(self):
phantom_course = CourseLocator(org='phantom', course='phantom', run='phantom')
self._create_cert(phantom_course, self.user, 'error')
self._run_command()
# Expect that the certificate was NOT resubmitted
# since the course doesn't actually exist.
self._assert_cert_status(phantom_course, self.user, CertificateStatuses.error)
@attr('shard_1')
class RegenerateCertificatesTest(CertificateManagementTest):
"""
Tests for regenerating certificates.
"""
command = regenerate_user
def setUp(self):
"""
We just need one course here.
"""
super(RegenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@override_settings(CERT_QUEUE='test-queue')
@patch('certificates.api.XQueueCertInterface', spec=True)
def test_clear_badge(self, xqueue):
"""
Given that I have a user with a badge
If I run regeneration for a user
Then certificate generation will be requested
And the badge will be deleted
"""
key = self.course.location.course_key
BadgeAssertionFactory(user=self.user, course_id=key, data={})
self._create_cert(key, self.user, CertificateStatuses.downloadable)
self.assertTrue(BadgeAssertion.objects.filter(user=self.user, course_id=key))
self._run_command(
username=self.user.email, course=unicode(key), noop=False, insecure=False, template_file=None,
grade_value=None
)
xqueue.return_value.regen_cert.assert_called_with(
self.user,
key,
course=self.course,
forced_grade=None,
template_file=None,
generate_pdf=True
)
self.assertFalse(BadgeAssertion.objects.filter(user=self.user, course_id=key))
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_regenerating_certificate(self, mock_send_to_queue):
"""
Given that I have a user who has not passed course
If I run regeneration for that user
Then certificate generation will be not be requested
"""
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.downloadable)
self._run_command(
username=self.user.email, course=unicode(key), noop=False, insecure=True, template_file=None,
grade_value=None
)
certificate = GeneratedCertificate.objects.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.notpassing)
self.assertFalse(mock_send_to_queue.called)
@attr('shard_1')
class UngenerateCertificatesTest(CertificateManagementTest):
"""
Tests for generating certificates.
"""
command = ungenerated_certs
def setUp(self):
"""
We just need one course here.
"""
super(UngenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_ungenerated_certificate(self, mock_send_to_queue):
"""
Given that I have ended course
If I run ungenerated certs command
Then certificates should be generated for all users who passed course
"""
mock_send_to_queue.return_value = (0, "Successfully queued")
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.unavailable)
with self._mock_passing_grade():
self._run_command(
course=unicode(key), noop=False, insecure=True, force=False
)
self.assertTrue(mock_send_to_queue.called)
certificate = GeneratedCertificate.objects.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.generating)
@contextmanager
def _mock_passing_grade(self):
"""Mock the grading function to always return a passing grade. """
symbol = 'courseware.grades.grade'
with patch(symbol) as mock_grade:
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
yield
|
andmos/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/gitlab.py
|
37
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import json
from ansible.module_utils.urls import fetch_url
try:
from urllib import quote_plus # Python 2.X
except ImportError:
from urllib.parse import quote_plus # Python 3+
def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
headers = {}
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
else:
headers['Private-Token'] = private_token
headers['Accept'] = "application/json"
headers['Content-Type'] = "application/json"
response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
status = info['status']
content = ""
if response:
content = response.read()
if status == 204:
return True, content
elif status == 200 or status == 201:
return True, json.loads(content)
else:
return False, str(status) + ": " + content
def findProject(gitlab_instance, identifier):
try:
project = gitlab_instance.projects.get(identifier)
except Exception as e:
current_user = gitlab_instance.user
try:
project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
except Exception as e:
return None
return project
def findGroup(gitlab_instance, identifier):
try:
project = gitlab_instance.groups.get(identifier)
except Exception as e:
return None
return project
|
currychou/1
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/pickle.py
|
1265
|
from json import *
|
apahim/avocado-misc-tests
|
refs/heads/master
|
io/net/infiniband/ucmatose.py
|
1
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Narasimhan V <sim@linux.vnet.ibm.com>
# Author: Manvanthara B Puttashankar <manvanth@linux.vnet.ibm.com>
"""
Ucmatose test
"""
import time
import netifaces
from netifaces import AF_INET
from avocado import Test
from avocado import main
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import process, distro
class Ucmatose(Test):
"""
Ucmatose Test.
"""
def setUp(self):
"""
Setup and install dependencies for the test.
"""
self.test_name = "ucmatose"
self.basic = self.params.get("basic_option", default="None")
self.ext = self.params.get("ext_option", default="None")
self.flag = self.params.get("ext_flag", default="0")
if self.basic == "None" and self.ext == "None":
self.skip("No option given")
if self.flag == "1" and self.ext != "None":
self.option = self.ext
else:
self.option = self.basic
if process.system("ibstat", shell=True, ignore_status=True) != 0:
self.skip("MOFED is not installed. Skipping")
detected_distro = distro.detect()
pkgs = []
smm = SoftwareManager()
if detected_distro.name == "Ubuntu":
pkgs.extend(["openssh-client", "iputils-ping"])
elif detected_distro.name == "SuSE":
pkgs.extend(["openssh", "iputils"])
else:
pkgs.extend(["openssh-clients", "iputils"])
for pkg in pkgs:
if not smm.check_installed(pkg) and not smm.install(pkg):
self.skip("Not able to install %s" % pkg)
interfaces = netifaces.interfaces()
self.flag = self.params.get("ext_flag", default="0")
self.iface = self.params.get("interface", default="")
self.peer_ip = self.params.get("peer_ip", default="")
if self.iface not in interfaces:
self.skip("%s interface is not available" % self.iface)
if self.peer_ip == "":
self.skip("%s peer machine is not available" % self.peer_ip)
self.timeout = "2m"
self.local_ip = netifaces.ifaddresses(self.iface)[AF_INET][0]['addr']
if detected_distro.name == "Ubuntu":
cmd = "service ufw stop"
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif detected_distro.name in ['rhel', 'fedora', 'redhat']:
cmd = "systemctl stop firewalld"
elif detected_distro.name == "SuSE":
cmd = "rcSuSEfirewall2 stop"
elif detected_distro.name == "centos":
cmd = "service iptables stop"
else:
self.skip("Distro not supported")
if process.system("%s && ssh %s %s" % (cmd, self.peer_ip, cmd),
ignore_status=True, shell=True) != 0:
self.skip("Unable to disable firewall")
def test(self):
"""
Test ucmatose
"""
self.log.info(self.test_name)
logs = "> /tmp/ib_log 2>&1 &"
cmd = "ssh %s \" timeout %s %s -b %s %s %s\" " \
% (self.peer_ip, self.timeout, self.test_name,
self.peer_ip, self.option, logs)
if process.system(cmd, shell=True, ignore_status=True) != 0:
self.fail("SSH connection (or) Server command failed")
time.sleep(5)
self.log.info("Client data - %s(%s)" % (self.test_name, self.option))
cmd = "timeout %s %s -s %s -b %s %s" \
% (self.timeout, self.test_name, self.peer_ip,
self.local_ip, self.option)
if process.system(cmd, shell=True, ignore_status=True) != 0:
self.fail("Client command failed")
time.sleep(5)
self.log.info("Server data - %s(%s)" % (self.test_name, self.option))
cmd = " ssh %s \"timeout %s cat /tmp/ib_log && rm -rf /tmp/ib_log\" " \
% (self.peer_ip, self.timeout)
if process.system(cmd, shell=True, ignore_status=True) != 0:
self.fail("Server output retrieval failed")
if __name__ == "__main__":
main()
|
frappe/erpnext
|
refs/heads/develop
|
erpnext/hotels/doctype/hotel_room_type/hotel_room_type.py
|
19
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class HotelRoomType(Document):
pass
|
ambimorph/malaprop
|
refs/heads/master
|
malaprop/test/test_corrector.py
|
1
|
# test_corrector.py
from malaprop.correction.corrector import *
from recluse.nltk_based_segmenter_tokeniser import *
from malaprop.correction.HMM import *
from DamerauLevenshteinDerivor.cderivor import Derivor
from BackOffTrigramModel.BackOffTrigramModelPipe import BackOffTMPipe
import unittest, StringIO, subprocess
class MatchCaseTest(unittest.TestCase):
def test_match_case(self):
result = match_case('This', 'that')
self.assertEqual(result, 'That'), result
result = match_case('this', 'that')
self.assertEqual(result, 'that'), result
result = match_case('THIS', 'that')
self.assertEqual(result, 'THAT'), result
result = match_case('MacGregor', 'macgregor')
self.assertEqual(result, 'MacGregor'), result
result = match_case('MacGregor', 'macdregor')
self.assertEqual(result, 'MacDregor'), result
result = match_case('McGregor', 'macgregor')
self.assertEqual(result, 'MacGregor'), result
result = match_case('MacGregor', 'mcgregor')
self.assertEqual(result, 'McGregor'), result
result = match_case('OrC', 'or')
self.assertEqual(result, 'Or'), result
result = match_case('OrC', 'orca')
self.assertEqual(result, 'OrCa'), result
class CorrectorTest(unittest.TestCase):
def setUp(self):
training_text_file = open('malaprop/test/data/segmenter_training', 'r')
segmenter_tokeniser = NLTKBasedSegmenterTokeniser(training_text_file)
path_to_botmp = subprocess.check_output(['which', 'BackOffTrigramModelPipe']).strip()
arpa_file_name = 'malaprop/test/data/trigram_model_2K.arpa'
botmp = BackOffTMPipe(path_to_botmp, arpa_file_name)
error_rate = 0.3
d = Derivor('malaprop/test/data/1K_test_real_word_vocab')
hmm = HMM(d.variations, botmp, error_rate, 2)
self.c = Corrector(segmenter_tokeniser, hmm)
def test_correct(self):
# Regression tests: these results are consistent with the
# probabilities of their input, but their passing is not a
# guarantee of correctness.
sentence = 'It is therefore a more specific from of the term reflectivity.'
result = self.c.correct(sentence)
expected_result = [[6, 0, 'from', 'form'], [9, 0, 'term', 'team']]
self.assertListEqual(result, expected_result), result
result = self.c.correct(sentence, output='sentence')
expected_result = 'It is therefore a more specific form of the team reflectivity.'
self.assertEqual(result, expected_result), result
sentence = 'Most land areas are in in albedo range of 0.1 to 0.4.'
result = self.c.correct(sentence)
expected_result = [[2,0, 'areas', 'area'], [4,0, 'in', 'win']]
self.assertListEqual(result, expected_result), result
result = self.c.correct(sentence, output='sentence')
expected_result = 'Most land area are win in albedo range of 0.1 to 0.4.'
self.assertEqual(result, expected_result), result
if __name__ == '__main__':
unittest.main()
|
was4444/chromium.src
|
refs/heads/nw15
|
chrome/common/extensions/docs/server2/compiled_file_system_test.py
|
79
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
from compiled_file_system import Cache, CompiledFileSystem
from copy import deepcopy
from environment import GetAppVersion
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
},
'deepdir': {
'deepfile.html': 'deepfile.html contents',
'deeper': {
'deepest.html': 'deepest.html contents',
},
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
identity = lambda _, x: x
def _GetTestCompiledFsCreator():
'''Returns a function which creates CompiledFileSystem views of
TestFileSystems backed by _TEST_DATA.
'''
return functools.partial(
CompiledFileSystem.Factory(
ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True),
).Create,
TestFileSystem(deepcopy(_TEST_DATA)))
class CompiledFileSystemTest(unittest.TestCase):
def testPopulateNamespace(self):
def CheckNamespace(expected_file, expected_list, fs):
self.assertEqual(expected_file, fs._file_object_store.namespace)
self.assertEqual(expected_list, fs._list_object_store.namespace)
compiled_fs_creator = _GetTestCompiledFsCreator()
f = lambda x: x
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest))
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest, category='foo'))
def testPopulateFromFile(self):
def Sleepy(key, val):
return '%s%s' % ('Z' * len(key), 'z' * len(val))
compiled_fs = _GetTestCompiledFsCreator()(Sleepy, CompiledFileSystemTest)
self.assertEqual('ZZZZZZZZzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/a11y.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/fakedir/file.html').Get())
def testPopulateFromFileListing(self):
def strip_ext(_, files):
return [os.path.splitext(f)[0] for f in files]
compiled_fs = _GetTestCompiledFsCreator()(strip_ext, CompiledFileSystemTest)
expected_top_listing = [
'404',
'apps/a11y',
'apps/about_apps',
'apps/deepdir/deeper/deepest',
'apps/deepdir/deepfile',
'apps/fakedir/file',
'extensions/activeTab',
'extensions/alarms'
]
self.assertEqual(expected_top_listing,
sorted(compiled_fs.GetFromFileListing('').Get()))
expected_apps_listing = [
'a11y',
'about_apps',
'deepdir/deeper/deepest',
'deepdir/deepfile',
'fakedir/file',
]
self.assertEqual(expected_apps_listing,
sorted(compiled_fs.GetFromFileListing('apps/').Get()))
self.assertEqual(['file',],
compiled_fs.GetFromFileListing('apps/fakedir/').Get())
self.assertEqual(['deeper/deepest', 'deepfile'],
sorted(compiled_fs.GetFromFileListing(
'apps/deepdir/').Get()))
self.assertEqual(['deepest'],
compiled_fs.GetFromFileListing(
'apps/deepdir/deeper/').Get())
def testCaching(self):
compiled_fs = _GetTestCompiledFsCreator()(Cache(identity),
CompiledFileSystemTest)
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system._path_values['404.html'] = 'boom'
compiled_fs._file_system._path_values['apps/fakedir/'] = [
'file.html', 'boom.html']
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system.IncrementStat()
self.assertEqual('boom', compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html', 'boom.html')),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
def testFailures(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFile('405.html').Get)
# TODO(kalman): would be nice to test this fails since apps/ is a dir.
compiled_fs.GetFromFile('apps')
#self.assertRaises(SomeError, compiled_fs.GetFromFile, 'apps/')
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('nodir/').Get)
# TODO(kalman): likewise, not a FileNotFoundError.
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('404.html/').Get)
def testCorrectFutureBehaviour(self):
# Tests that the underlying FileSystem's Read Future has had Get() called
# on it before the Future is resolved, but the underlying Future isn't
# resolved until Get is.
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, lambda path, contents: contents, type(self))
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('404.html')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1, read_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFileListing('apps/')
# Current behaviour is to have read=2 and read_resolve=1 because the first
# level is read eagerly, then all of the second is read (in parallel). If
# it weren't eager (and it may be worth experimenting with that) then it'd
# be read=1 and read_resolve=0.
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
# It's doing 1 more level 'deeper' (already read 'fakedir' and 'deepdir'
# though not resolved), so that's 1 more read/resolve + the resolve from
# the first read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, read_resolve_count=2))
# Even though the directory is 1 layer deep the caller has no way of
# determining that ahead of time (though perhaps the API could give some
# kind of clue, if we really cared).
future = compiled_fs.GetFromFileListing('extensions/')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=1,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset())
# Similar configuration to the 'apps/' case but deeper.
future = compiled_fs.GetFromFileListing('')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_count=2, read_resolve_count=3))
def testSkipNotFound(self):
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, Cache(lambda path, contents: contents), type(self))
future = compiled_fs.GetFromFile('no_file', skip_not_found=True)
# If the file doesn't exist, then the file system is not read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, stat_count=1))
self.assertEqual(None, future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFile('no_file', skip_not_found=True)
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
self.assertEqual(None, future.Get())
# The result for a non-existent file should still be cached.
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('no_file')
self.assertRaises(FileNotFoundError, future.Get)
if __name__ == '__main__':
unittest.main()
|
jswope00/griffinx
|
refs/heads/master
|
common/lib/xmodule/xmodule/open_ended_grading_classes/open_ended_module.py
|
7
|
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
"""
import json
import logging
from lxml import etree
import capa.xqueue_interface as xqueue_interface
from xmodule.capa_module import ComplexEncoder
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
from capa.util import *
import openendedchild
from numpy import median
from datetime import datetime
from pytz import UTC
from .combined_open_ended_rubric import CombinedOpenEndedRubric
log = logging.getLogger("edx.courseware")
class OpenEndedModule(openendedchild.OpenEndedChild):
"""
The open ended module supports all external open ended grader problems.
Sample XML file:
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
"""
TEMPLATE_DIR = "combinedopenended/openended"
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the response type.
@param system: Modulesystem object
@param location: The location of the problem
@param definition: The xml definition of the problem
@param descriptor: The OpenEndedDescriptor associated with this
@return: None
"""
oeparam = definition['oeparam']
self.url = definition.get('url', None)
self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE)
self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
# This is needed to attach feedback to specific responses later
self.submission_id = None
self.grader_id = None
error_message = "No {0} found in problem xml for open ended problem. Contact the learning sciences group for assistance."
if oeparam is None:
# This is a staff_facing_error
raise ValueError(error_message.format('oeparam'))
if self.child_prompt is None:
raise ValueError(error_message.format('prompt'))
if self.child_rubric is None:
raise ValueError(error_message.format('rubric'))
self._parse(oeparam, self.child_prompt, self.child_rubric, system)
# If there are multiple tasks (like self-assessment followed by ai), once
# the the status of the first task is set to DONE, setup_next_task() will
# create the OpenEndedChild with parameter child_created=True so that the
# submission can be sent to the grader. Keep trying each time this module
# is loaded until it succeeds.
if self.child_created is True and self.child_state == self.ASSESSING:
success, message = self.send_to_grader(self.latest_answer(), system)
if success:
self.child_created = False
def _parse(self, oeparam, prompt, rubric, system):
'''
Parse OpenEndedResponse XML:
self.initial_display
self.payload - dict containing keys --
'grader' : path to grader settings file, 'problem_id' : id of the problem
self.answer - What to display when show answer is clicked
'''
# Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
prompt_string = stringify_children(prompt)
rubric_string = stringify_children(rubric)
self.child_prompt = prompt_string
self.child_rubric = rubric_string
grader_payload = oeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
# Update grader payload with student id. If grader payload not json, error.
try:
parsed_grader_payload = json.loads(grader_payload)
# NOTE: self.system.location is valid because the capa_module
# __init__ adds it (easiest way to get problem location into
# response types)
except (TypeError, ValueError):
# This is a dev_facing_error
log.exception(
"Grader payload from external open ended grading server is not a json object! Object: {0}".format(
grader_payload))
self.initial_display = find_with_default(oeparam, 'initial_display', '')
self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
parsed_grader_payload.update({
'location': self.location_string,
'course_id': system.course_id.to_deprecated_string(),
'prompt': prompt_string,
'rubric': rubric_string,
'initial_display': self.initial_display,
'answer': self.answer,
'problem_id': self.display_name,
'skip_basic_checks': self.skip_basic_checks,
'control': json.dumps(self.control),
})
updated_grader_payload = json.dumps(parsed_grader_payload)
self.payload = {'grader_payload': updated_grader_payload}
def skip_post_assessment(self, _data, system):
"""
Ajax function that allows one to skip the post assessment phase
@param data: AJAX dictionary
@param system: ModuleSystem
@return: Success indicator
"""
self.child_state = self.DONE
return {'success': True}
def message_post(self, data, system):
"""
Handles a student message post (a reaction to the grade they received from an open ended grader type)
Returns a boolean success/fail and an error message
"""
event_info = dict()
event_info['problem_id'] = self.location_string
event_info['student_id'] = system.anonymous_student_id
event_info['survey_responses'] = data
_ = self.system.service(self, "i18n").ugettext
survey_responses = event_info['survey_responses']
for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
if tag not in survey_responses:
# This is a student_facing_error
return {
'success': False,
# Translators: 'tag' is one of 'feedback', 'submission_id',
# 'grader_id', or 'score'. They are categories that a student
# responds to when filling out a post-assessment survey
# of his or her grade from an openended problem.
'msg': _("Could not find needed tag {tag_name} in the "
"survey responses. Please try submitting "
"again.").format(tag_name=tag)
}
try:
submission_id = int(survey_responses['submission_id'])
grader_id = int(survey_responses['grader_id'])
feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
score = int(survey_responses['score'])
except:
# This is a dev_facing_error
error_message = (
"Could not parse submission id, grader id, "
"or feedback from message_post ajax call. "
"Here is the message data: {0}".format(survey_responses)
)
log.exception(error_message)
# This is a student_facing_error
return {
'success': False,
'msg': _(
"There was an error saving your feedback. Please "
"contact course staff."
)
}
xqueue = system.get('xqueue')
if xqueue is None:
return {'success': False, 'msg': _("Couldn't submit feedback.")}
qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.message_queue_name
)
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents = {
'feedback': feedback,
'submission_id': submission_id,
'grader_id': grader_id,
'score': score,
'student_info': json.dumps(student_info),
}
error, error_message = qinterface.send_to_queue(
header=xheader,
body=json.dumps(contents)
)
# Convert error to a success value
success = True
message = _("Successfully saved your feedback.")
if error:
success = False
message = _("Unable to save your feedback. Please try again later.")
log.error("Unable to send feedback to grader. location: {0}, error_message: {1}".format(
self.location_string, error_message
))
else:
self.child_state = self.DONE
# This is a student_facing_message
return {'success': success, 'msg': message}
def send_to_grader(self, submission, system):
"""
Send a given submission to the grader, via the xqueue
@param submission: The student submission to send to the grader
@param system: Modulesystem
@return: Boolean true (not useful right now)
"""
# Prepare xqueue request
#------------------------------------------------------------
xqueue = system.get('xqueue')
if xqueue is None:
return False
qinterface = xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +
anonymous_student_id +
str(len(self.child_history)))
xheader = xqueue_interface.make_xheader(
lms_callback_url=xqueue['construct_callback'](),
lms_key=queuekey,
queue_name=self.queue_name
)
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external grader
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
# Update contents with student response and student info
contents.update({
'student_info': json.dumps(student_info),
'student_response': submission,
'max_score': self.max_score(),
})
# Submit request. When successful, 'msg' is the prior length of the queue
error, error_message = qinterface.send_to_queue(
header=xheader,
body=json.dumps(contents)
)
# State associated with the queueing request
queuestate = {
'key': queuekey,
'time': qtime,
}
_ = self.system.service(self, "i18n").ugettext
success = True
message = _("Successfully saved your submission.")
if error:
success = False
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
message = _('Unable to submit your submission to the grader. Please try again later.')
log.error("Unable to submit to grader. location: {0}, error_message: {1}".format(
self.location_string, error_message
))
return (success, message)
def _update_score(self, score_msg, queuekey, system):
"""
Called by xqueue to update the score
@param score_msg: The message from xqueue
@param queuekey: The key sent by xqueue
@param system: Modulesystem
@return: Boolean True (not useful currently)
"""
_ = self.system.service(self, "i18n").ugettext
new_score_msg = self._parse_score_msg(score_msg, system)
if not new_score_msg['valid']:
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
new_score_msg['feedback'] = _('Invalid grader reply. Please contact the course staff.')
# self.child_history is initialized as []. record_latest_score() and record_latest_post_assessment()
# operate on self.child_history[-1]. Thus we have to make sure child_history is not [].
# Handle at this level instead of in record_*() because this is a good place to reduce the number of conditions
# and also keep the persistent state from changing.
if self.child_history:
self.record_latest_score(new_score_msg['score'])
self.record_latest_post_assessment(score_msg)
self.child_state = self.POST_ASSESSMENT
else:
log.error(
"Trying to update score without existing studentmodule child_history:\n"
" location: {location}\n"
" score: {score}\n"
" grader_ids: {grader_ids}\n"
" submission_ids: {submission_ids}".format(
location=self.location_string,
score=new_score_msg['score'],
grader_ids=new_score_msg['grader_ids'],
submission_ids=new_score_msg['submission_ids'],
)
)
return True
def get_answers(self):
"""
Gets and shows the answer for this problem.
@return: Answer html
"""
anshtml = '<span class="openended-answer"><pre><code>{0}</code></pre></span>'.format(self.answer)
return {self.answer_id: anshtml}
def get_initial_display(self):
"""
Gets and shows the initial display for the input box.
@return: Initial display html
"""
return {self.answer_id: self.initial_display}
def _convert_longform_feedback_to_html(self, response_items):
"""
Take in a dictionary, and return html strings for display to student.
Input:
response_items: Dictionary with keys success, feedback.
if success is True, feedback should be a dictionary, with keys for
types of feedback, and the corresponding feedback values.
if success is False, feedback is actually an error string.
NOTE: this will need to change when we integrate peer grading, because
that will have more complex feedback.
Output:
String -- html that can be displayincorrect-icon.pnged to the student.
"""
# We want to display available feedback in a particular order.
# This dictionary specifies which goes first--lower first.
priorities = {
# These go at the start of the feedback
'spelling': 0,
'grammar': 1,
# needs to be after all the other feedback
'markup_text': 3
}
do_not_render = ['topicality', 'prompt-overlap']
default_priority = 2
def get_priority(elt):
"""
Args:
elt: a tuple of feedback-type, feedback
Returns:
the priority for this feedback type
"""
return priorities.get(elt[0], default_priority)
def encode_values(feedback_type, value):
feedback_type = str(feedback_type).encode('ascii', 'ignore')
if not isinstance(value, basestring):
value = str(value)
value = value.encode('ascii', 'ignore')
return feedback_type, value
def format_feedback(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = u"""
<div class="{feedback_type}">
{value}
</div>
""".format(feedback_type=feedback_type, value=value)
return feedback
def format_feedback_hidden(feedback_type, value):
feedback_type, value = encode_values(feedback_type, value)
feedback = """
<input class="{feedback_type}" type="hidden" value="{value}" />
""".format(feedback_type=feedback_type, value=value)
return feedback
# TODO (vshnayder): design and document the details of this format so
# that we can do proper escaping here (e.g. are the graders allowed to
# include HTML?)
_ = self.system.service(self, "i18n").ugettext
for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
if tag not in response_items:
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('Error getting feedback from grader.')
)
feedback_items = response_items['feedback']
try:
feedback = json.loads(feedback_items)
except (TypeError, ValueError):
# This is a dev_facing_error
log.exception("feedback_items from external open ended grader have invalid json {0}".format(feedback_items))
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('Error getting feedback from grader.')
)
if response_items['success']:
if len(feedback) == 0:
# This is a student_facing_error
return format_feedback(
# Translators: the `grader` refers to the grading service open response problems
# are sent to, either to be machine-graded, peer-graded, or instructor-graded.
'errors', _('No feedback available from grader.')
)
for tag in do_not_render:
if tag in feedback:
feedback.pop(tag)
feedback_lst = sorted(feedback.items(), key=get_priority)
feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
else:
# This is a student_facing_error
feedback_list_part1 = format_feedback('errors', response_items['feedback'])
feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value)
for feedback_type, value in response_items.items()
if feedback_type in ['submission_id', 'grader_id']]))
return u"\n".join([feedback_list_part1, feedback_list_part2])
def _format_feedback(self, response_items, system):
"""
Input:
Dictionary called feedback. Must contain keys seen below.
Output:
Return error message or feedback template
"""
rubric_feedback = ""
feedback = self._convert_longform_feedback_to_html(response_items)
rubric_scores = []
if response_items['rubric_scores_complete'] is True:
rubric_renderer = CombinedOpenEndedRubric(system.render_template, True)
rubric_dict = rubric_renderer.render_rubric(response_items['rubric_xml'])
success = rubric_dict['success']
rubric_feedback = rubric_dict['html']
rubric_scores = rubric_dict['rubric_scores']
if not response_items['success']:
return system.render_template(
"{0}/open_ended_error.html".format(self.TEMPLATE_DIR),
{'errors': feedback}
)
feedback_template = system.render_template("{0}/open_ended_feedback.html".format(self.TEMPLATE_DIR), {
'grader_type': response_items['grader_type'],
'score': "{0} / {1}".format(response_items['score'], self.max_score()),
'feedback': feedback,
'rubric_feedback': rubric_feedback
})
return feedback_template, rubric_scores
def _parse_score_msg(self, score_msg, system, join_feedback=True):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg
'feedback' : feedback from grader
'grader_type': what type of grader resulted in this score
'grader_id': id of the grader
'submission_id' : id of the submission
'success': whether or not this submission was successful
'rubric_scores': a list of rubric scores
'rubric_scores_complete': boolean if rubric scores are complete
'rubric_xml': the xml of the rubric in string format
}
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
"""
fail = {
'valid': False,
'score': 0,
'feedback': '',
'rubric_scores': [[0]],
'grader_types': [''],
'feedback_items': [''],
'feedback_dicts': [{}],
'grader_ids': [0],
'submission_ids': [0],
}
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_msg = {0}".format(score_msg))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not isinstance(score_result, dict):
# This is a dev_facing_error
error_message = ("External open ended grader message should be a JSON-serialized dict."
" Received score_result = {0}".format(score_result))
log.error(error_message)
fail['feedback'] = error_message
return fail
if not score_result:
return fail
for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
if tag not in score_result:
# This is a dev_facing_error
error_message = ("External open ended grader message is missing required tag: {0}"
.format(tag))
log.error(error_message)
fail['feedback'] = error_message
return fail
# This is to support peer grading
if isinstance(score_result['score'], list):
feedback_items = []
rubric_scores = []
grader_types = []
feedback_dicts = []
grader_ids = []
submission_ids = []
for i in xrange(0, len(score_result['score'])):
new_score_result = {
'score': score_result['score'][i],
'feedback': score_result['feedback'][i],
'grader_type': score_result['grader_type'],
'success': score_result['success'],
'grader_id': score_result['grader_id'][i],
'submission_id': score_result['submission_id'],
'rubric_scores_complete': score_result['rubric_scores_complete'][i],
'rubric_xml': score_result['rubric_xml'][i],
}
feedback_template, rubric_score = self._format_feedback(new_score_result, system)
feedback_items.append(feedback_template)
rubric_scores.append(rubric_score)
grader_types.append(score_result['grader_type'])
try:
feedback_dict = json.loads(score_result['feedback'][i])
except Exception:
feedback_dict = score_result['feedback'][i]
feedback_dicts.append(feedback_dict)
grader_ids.append(score_result['grader_id'][i])
submission_ids.append(score_result['submission_id'])
if join_feedback:
feedback = "".join(feedback_items)
else:
feedback = feedback_items
score = int(round(median(score_result['score'])))
else:
# This is for instructor and ML grading
feedback, rubric_score = self._format_feedback(score_result, system)
score = score_result['score']
rubric_scores = [rubric_score]
grader_types = [score_result['grader_type']]
feedback_items = [feedback]
try:
feedback_dict = json.loads(score_result['feedback'])
except Exception:
feedback_dict = score_result.get('feedback', '')
feedback_dicts = [feedback_dict]
grader_ids = [score_result['grader_id']]
submission_ids = [score_result['submission_id']]
self.submission_id = score_result['submission_id']
self.grader_id = score_result['grader_id']
return {
'valid': True,
'score': score,
'feedback': feedback,
'rubric_scores': rubric_scores,
'grader_types': grader_types,
'feedback_items': feedback_items,
'feedback_dicts': feedback_dicts,
'grader_ids': grader_ids,
'submission_ids': submission_ids,
}
def latest_post_assessment(self, system, short_feedback=False, join_feedback=True):
"""
Gets the latest feedback, parses, and returns
@param short_feedback: If the long feedback is wanted or not
@return: Returns formatted feedback
"""
if not self.child_history:
return ""
feedback_dict = self._parse_score_msg(
self.child_history[-1].get('post_assessment', "{}"),
system,
join_feedback=join_feedback
)
if not short_feedback:
return feedback_dict['feedback'] if feedback_dict['valid'] else ''
if feedback_dict['valid']:
short_feedback = self._convert_longform_feedback_to_html(
json.loads(self.child_history[-1].get('post_assessment', "")))
return short_feedback if feedback_dict['valid'] else ''
def format_feedback_with_evaluation(self, system, feedback):
"""
Renders a given html feedback into an evaluation template
@param feedback: HTML feedback
@return: Rendered html
"""
context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50}
html = system.render_template('{0}/open_ended_evaluation.html'.format(self.TEMPLATE_DIR), context)
return html
def handle_ajax(self, dispatch, data, system):
'''
This is called by courseware.module_render, to handle an AJAX call.
"data" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
'''
handlers = {
'save_answer': self.save_answer,
'score_update': self.update_score,
'save_post_assessment': self.message_post,
'skip_post_assessment': self.skip_post_assessment,
'check_for_score': self.check_for_score,
'store_answer': self.store_answer,
}
_ = self.system.service(self, "i18n").ugettext
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps(
{'error': _('Error handling action. Please try again.'), 'success': False}
)
before = self.get_progress()
d = handlers[dispatch](data, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
return json.dumps(d, cls=ComplexEncoder)
def check_for_score(self, _data, system):
"""
Checks to see if a score has been received yet.
@param data: AJAX dictionary
@param system: Modulesystem (needed to align with other ajax functions)
@return: Returns the current state
"""
state = self.child_state
return {'state': state}
def save_answer(self, data, system):
"""
Saves a student answer
@param data: AJAX dictionary
@param system: modulesystem
@return: Success indicator
"""
# Once we close the problem, we should not allow students
# to save answers
error_message = ""
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.INITIAL:
return self.out_of_sync_error(data)
message = "Successfully saved your submission."
# add new history element with answer and empty score and hint.
success, error_message, data = self.append_file_link_to_student_answer(data)
if not success:
message = error_message
else:
data['student_answer'] = OpenEndedModule.sanitize_html(data['student_answer'])
success, error_message = self.send_to_grader(data['student_answer'], system)
if not success:
message = error_message
# Store the answer instead
self.store_answer(data, system)
else:
self.new_history_entry(data['student_answer'])
self.change_state(self.ASSESSING)
return {
'success': success,
'error': message,
'student_response': data['student_answer'].replace("\n", "<br/>")
}
def update_score(self, data, system):
"""
Updates the current score via ajax. Called by xqueue.
Input: AJAX data dictionary, modulesystem
Output: None
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# TODO: Remove need for cmap
self._update_score(score_msg, queuekey, system)
return dict() # No AJAX return is needed
def get_html(self, system):
"""
Gets the HTML for this problem and renders it
Input: Modulesystem object
Output: Rendered HTML
"""
_ = self.system.service(self, "i18n").ugettext
# set context variables and render template
eta_string = None
if self.child_state != self.INITIAL:
post_assessment = self.latest_post_assessment(system)
score = self.latest_score()
correct = 'correct' if self.is_submission_correct(score) else 'incorrect'
if self.child_state == self.ASSESSING:
# Translators: this string appears once an openended response
# is submitted but before it has been graded
eta_string = _("Your response has been submitted. Please check back later for your grade.")
else:
post_assessment = ""
correct = ""
previous_answer = self.get_display_answer()
# Use the module name as a unique id to pass to the template.
try:
module_id = self.system.location.name
except AttributeError:
# In cases where we don't have a system or a location, use a fallback.
module_id = "open_ended"
context = {
'prompt': self.child_prompt,
'previous_answer': previous_answer,
'state': self.child_state,
'allow_reset': self._allow_reset(),
'rows': 30,
'cols': 80,
'module_id': module_id,
'msg': post_assessment,
'child_type': 'openended',
'correct': correct,
'accept_file_upload': self.accept_file_upload,
'eta_message': eta_string,
}
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
def latest_score(self):
"""None if not available"""
if not self.child_history:
return None
return self.score_for_attempt(-1)
def all_scores(self):
"""None if not available"""
if not self.child_history:
return None
return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]
def score_for_attempt(self, index):
"""
Return sum of rubric scores for ML grading otherwise return attempt["score"].
"""
attempt = self.child_history[index]
score = attempt.get('score')
post_assessment_data = self._parse_score_msg(attempt.get('post_assessment', "{}"), self.system)
grader_types = post_assessment_data.get('grader_types')
# According to _parse_score_msg in ML grading there should be only one grader type.
if len(grader_types) == 1 and grader_types[0] == 'ML':
rubric_scores = post_assessment_data.get("rubric_scores")
# Similarly there should be only one list of rubric scores.
if len(rubric_scores) == 1:
rubric_scores_sum = sum(rubric_scores[0])
log.debug("""Score normalized for location={loc}, old_score={old_score},
new_score={new_score}, rubric_score={rubric_score}""".format(
loc=self.location_string,
old_score=score,
new_score=rubric_scores_sum,
rubric_score=rubric_scores
))
return rubric_scores_sum
return score
class OpenEndedDescriptor():
"""
Module for adding open ended response questions to courses
"""
mako_template = "widgets/html-edit.html"
module_class = OpenEndedModule
filename_extension = "xml"
has_score = True
def __init__(self, system):
self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the open ended parameters into a dictionary.
Returns:
{
'oeparam': 'some-html'
}
"""
for child in ['openendedparam']:
if len(xml_object.xpath(child)) != 1:
# This is a staff_facing_error
raise ValueError(
u"Open Ended definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
return xml_object.xpath(k)[0]
return {
'oeparam': parse('openendedparam')
}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('openended')
def add_child(k):
child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in ['openendedparam']:
add_child(child)
return elt
|
eranroz/revscoring
|
refs/heads/master
|
revscoring/languages/vietnamese.py
|
1
|
import re
import sys
import enchant
from .language import Language, LanguageUtility
# https://vi.wiktionary.org/wiki/Th%C3%A0nh_vi%C3%AAn:Laurent_Bouvier/Free_Vietnamese_Dictionary_Project_Vietnamese-Vietnamese#Allwiki_.28closed.29
STOPWORDS = set([
"ai", "bằng", "bị", "bộ", "cho", "chưa", "chỉ", "cuối", "cuộc",
"các", "cách", "cái", "có", "cùng", "cũng", "cạnh", "cả", "cục",
"của", "dùng", "dưới", "dừng", "giữa", "gì", "hay", "hoặc",
"khi", "khác", "không", "luôn", "là", "làm", "lại", "mà", "mọi",
"mỗi", "một", "nhiều", "như", "nhưng", "nào", "này", "nữa",
"phải", "qua", "quanh", "quá", "ra", "rất", "sau", "sẽ", "sự",
"theo", "thành", "thêm", "thì", "thứ", "trong", "trên", "trước",
"trừ", "tuy", "tìm", "từng", "và", "vài", "vào", "vì", "vẫn",
"về", "với", "xuống", "đang", "đã", "được", "đấy", "đầu", "đủ"
])
BAD_REGEXES = [
"[ck]ặ[tc]", "[ck]u", "cứt", "(dz?|gi)âm", "đái", "đéo", "đ[ụù].", "đĩ",
"đ[íị]t", "ỉa", "l[ôồ]n",
"dick", "cunt", "fag", "bitch", "shit", "fuck.*", "ass", "gay", "ghey",
"slut",
]
INFORMAL_REGEXES = [
"bợn", "bro", "chẳng", "ch[ớứ]", "cú", "đừng", "fải", "(he){2,}", "(hi)+",
"khỉ", "mày", "nghịch", "ngu", "ngụy", "nguỵ", "ok", "ơi", "quái", "thằng",
"thôi", "tui", "ừ", "vời", "wái?", "zì",
"moron", "retard", "stupid",
]
BAD_REGEX = re.compile("|".join(BAD_REGEXES))
INFORMAL_REGEX = re.compile("|".join(INFORMAL_REGEXES))
try:
DICTIONARY = enchant.Dict("vi")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'vi'. " +
"Consider installing 'hunspell-vi'.")
def stem_word_process():
def stem_word(word):
return word.lower()
return stem_word
stem_word = LanguageUtility("stem_word", stem_word_process)
def is_badword_process():
def is_badword(word):
return bool(BAD_REGEX.match(word.lower()))
return is_badword
is_badword = LanguageUtility("is_badword", is_badword_process)
def is_informal_word_process():
def is_informal_word(word):
return bool(INFORMAL_REGEX.match(word.lower()))
return is_informal_word
is_informal_word = LanguageUtility("is_informal_word",
is_informal_word_process, depends_on=[])
def is_misspelled_process():
def is_misspelled(word):
return not DICTIONARY.check(word)
return is_misspelled
is_misspelled = LanguageUtility("is_misspelled", is_misspelled_process)
def is_stopword_process():
def is_stopword(word):
return word.lower() in STOPWORDS
return is_stopword
is_stopword = LanguageUtility("is_stopword", is_stopword_process)
sys.modules[__name__] = Language(
__name__,
[stem_word, is_badword, is_informal_word, is_misspelled, is_stopword]
)
|
specify/specify7
|
refs/heads/next-release
|
specifyweb/workbench/upload/column_options.py
|
1
|
from typing import List, Dict, Any, NamedTuple, Union, Optional, Set
from typing_extensions import Literal
MatchBehavior = Literal["ignoreWhenBlank", "ignoreAlways", "ignoreNever"]
class ColumnOptions(NamedTuple):
column: str
matchBehavior: MatchBehavior
nullAllowed: bool
default: Optional[str]
def to_json(self) -> Union[Dict, str]:
if self.matchBehavior == "ignoreNever" and self.nullAllowed and self.default is None:
return self.column
return dict(self._asdict())
class ExtendedColumnOptions(NamedTuple):
column: str
matchBehavior: MatchBehavior
nullAllowed: bool
default: Optional[str]
uiformatter: Any
schemaitem: Any
picklist: Any
|
drawks/ansible
|
refs/heads/devel
|
test/units/modules/storage/netapp/test_na_ontap_lun_copy.py
|
43
|
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_lun_copy \
import NetAppOntapLUNCopy as my_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, parm1=None):
''' save arguments '''
self.type = kind
self.parm1 = parm1
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'destination_vserver':
xml = self.build_lun_info(self.parm1)
self.xml_out = xml
return xml
@staticmethod
def build_lun_info(data):
''' build xml data for lun-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_lun_copy = {
'source_vserver': 'ansible',
'destination_path': '/vol/test/test_copy_dest_dest_new_reviewd_new',
'source_path': '/vol/test/test_copy_1',
'destination_vserver': 'ansible',
'state': 'present'
}
def mock_args(self):
return {
'source_vserver': self.mock_lun_copy['source_vserver'],
'destination_path': self.mock_lun_copy['destination_path'],
'source_path': self.mock_lun_copy['source_path'],
'destination_vserver': self.mock_lun_copy['destination_vserver'],
'state': self.mock_lun_copy['state'],
'hostname': 'hostname',
'username': 'username',
'password': 'password',
}
# self.server = MockONTAPConnection()
def get_lun_copy_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_lun_copy object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_interface object
"""
lun_copy_obj = my_module()
lun_copy_obj.autosupport_log = Mock(return_value=None)
if kind is None:
lun_copy_obj.server = MockONTAPConnection()
else:
lun_copy_obj.server = MockONTAPConnection(kind=kind)
return lun_copy_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_create_error_missing_param(self):
''' Test if create throws an error if required param 'destination_vserver' is not specified'''
data = self.mock_args()
del data['destination_vserver']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_lun_copy_mock_object('lun_copy').copy_lun()
msg = 'Error: Missing one or more required parameters for copying lun: ' \
'destination_path, source_path, destination_path'
expected = sorted(','.split(msg))
received = sorted(','.split(exc.value.args[0]['msg']))
assert expected == received
def test_successful_copy(self):
''' Test successful create '''
# data = self.mock_args()
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_lun_copy_mock_object().apply()
assert exc.value.args[0]['changed']
def test_copy_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_lun_copy_mock_object('destination_vserver').apply()
assert not exc.value.args[0]['changed']
|
ritashugisha/MoonMovie
|
refs/heads/master
|
requests/compat.py
|
571
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
Linkinzoo/H5Exercise
|
refs/heads/master
|
Task11_SASS学习/JiKe/node_modules/node-gyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
olegv142/stm32tivc_usb_cdc
|
refs/heads/master
|
tools/cdc_detect.py
|
1
|
# Enumerate registered USB devices on windows host
import _winreg as reg
def cdc_enum(vid, pid):
"""Returns the list of port names for the giver VID/PID"""
k, i = None, 0
ports = []
try:
k = reg.OpenKey(reg.HKEY_LOCAL_MACHINE, "SYSTEM\CurrentControlSet\Enum\USB\Vid_%04x&Pid_%04x" % (vid, pid))
while True:
sk = None
dev = reg.EnumKey(k, i)
i += 1
try:
sk = reg.OpenKey(k, dev + '\Device Parameters')
v = reg.QueryValueEx(sk, 'PortName')
if v[1] == reg.REG_SZ:
ports.append(v[0].encode('ascii'))
except WindowsError:
pass
finally:
if sk is not None: reg.CloseKey(sk)
except WindowsError:
pass
finally:
if k is not None: reg.CloseKey(k)
return ports
def stm32_cdc_enum():
return cdc_enum(0x0483, 0x5740)
|
ESSolutions/ESSArch_Core
|
refs/heads/master
|
ESSArch_Core/essxml/ProfileMaker/migrations/0003_auto_20160830_0701.py
|
1
|
"""
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-30 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProfileMaker', '0002_finishedtemplate'),
]
operations = [
migrations.AddField(
model_name='templatepackage',
name='tempates',
field=models.JSONField(null=True),
),
]
|
odubno/microblog
|
refs/heads/master
|
venv/lib/python2.7/site-packages/babel/messages/catalog.py
|
86
|
# -*- coding: utf-8 -*-
"""
babel.messages.catalog
~~~~~~~~~~~~~~~~~~~~~~
Data structures for message catalogs.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types, PY2, cmp
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return any(PYTHON_FORMAT.search(id) for id in ids)
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
if PY2:
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
else:
_parse_header = message_from_string
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset=None, fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output (defaults to utf-8)
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
# FIXME: this should use dates.parse_datetime as soon as that
# is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.creation_date = dt.replace(tzinfo=tzoffset)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
# FIXME: this should use dates.parse_datetime as soon as
# that is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and
# minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.revision_date = dt.replace(tzinfo=tzoffset)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
|
jeanlinux/calibre
|
refs/heads/master
|
src/calibre/utils/chm/chm.py
|
22
|
## Copyright (C) 2003-2006 Rubens Ramos <rubensr@users.sourceforge.net>
## Based on code by:
## Copyright (C) 2003 Razvan Cojocaru <razvanco@gmx.net>
## pychm is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## $Id: chm.py,v 1.12 2006/08/07 12:31:51 rubensr Exp $
'''
chm - A high-level front end for the chmlib python module.
The chm module provides high level access to the functionality
included in chmlib. It encapsulates functions in the CHMFile class, and
provides some additional features, such as the ability to obtain
the contents tree of a CHM archive.
'''
import array
import string
import sys
import codecs
import calibre.utils.chm.chmlib as chmlib
from calibre.constants import plugins
extra, extra_err = plugins['chm_extra']
if extra_err:
raise RuntimeError('Failed to load chm.extra: '+extra_err)
charset_table = {
0 : 'iso8859_1', # ANSI_CHARSET
238 : 'iso8859_2', # EASTEUROPE_CHARSET
178 : 'iso8859_6', # ARABIC_CHARSET
161 : 'iso8859_7', # GREEK_CHARSET
177 : 'iso8859_8', # HEBREW_CHARSET
162 : 'iso8859_9', # TURKISH_CHARSET
222 : 'iso8859_11', # THAI_CHARSET - hmm not in python 2.2...
186 : 'iso8859_13', # BALTIC_CHARSET
204 : 'cp1251', # RUSSIAN_CHARSET
255 : 'cp437', # OEM_CHARSET
128 : 'cp932', # SHIFTJIS_CHARSET
134 : 'cp936', # GB2312_CHARSET
129 : 'cp949', # HANGUL_CHARSET
136 : 'cp950', # CHINESEBIG5_CHARSET
1 : None, # DEFAULT_CHARSET
2 : None, # SYMBOL_CHARSET
130 : None, # JOHAB_CHARSET
163 : None, # VIETNAMESE_CHARSET
77 : None, # MAC_CHARSET
}
locale_table = {
0x0436 : ('iso8859_1', "Afrikaans", "Western Europe & US"),
0x041c : ('iso8859_2', "Albanian", "Central Europe"),
0x0401 : ('iso8859_6', "Arabic_Saudi_Arabia", "Arabic"),
0x0801 : ('iso8859_6', "Arabic_Iraq", "Arabic"),
0x0c01 : ('iso8859_6', "Arabic_Egypt", "Arabic"),
0x1001 : ('iso8859_6', "Arabic_Libya", "Arabic"),
0x1401 : ('iso8859_6', "Arabic_Algeria", "Arabic"),
0x1801 : ('iso8859_6', "Arabic_Morocco", "Arabic"),
0x1c01 : ('iso8859_6', "Arabic_Tunisia", "Arabic"),
0x2001 : ('iso8859_6', "Arabic_Oman", "Arabic"),
0x2401 : ('iso8859_6', "Arabic_Yemen", "Arabic"),
0x2801 : ('iso8859_6', "Arabic_Syria", "Arabic"),
0x2c01 : ('iso8859_6', "Arabic_Jordan", "Arabic"),
0x3001 : ('iso8859_6', "Arabic_Lebanon", "Arabic"),
0x3401 : ('iso8859_6', "Arabic_Kuwait", "Arabic"),
0x3801 : ('iso8859_6', "Arabic_UAE", "Arabic"),
0x3c01 : ('iso8859_6', "Arabic_Bahrain", "Arabic"),
0x4001 : ('iso8859_6', "Arabic_Qatar", "Arabic"),
0x042b : (None, "Armenian","Armenian"),
0x042c : ('iso8859_9', "Azeri_Latin", "Turkish"),
0x082c : ('cp1251', "Azeri_Cyrillic", "Cyrillic"),
0x042d : ('iso8859_1', "Basque", "Western Europe & US"),
0x0423 : ('cp1251', "Belarusian", "Cyrillic"),
0x0402 : ('cp1251', "Bulgarian", "Cyrillic"),
0x0403 : ('iso8859_1', "Catalan", "Western Europe & US"),
0x0404 : ('cp950', "Chinese_Taiwan", "Traditional Chinese"),
0x0804 : ('cp936', "Chinese_PRC", "Simplified Chinese"),
0x0c04 : ('cp950', "Chinese_Hong_Kong", "Traditional Chinese"),
0x1004 : ('cp936', "Chinese_Singapore", "Simplified Chinese"),
0x1404 : ('cp950', "Chinese_Macau", "Traditional Chinese"),
0x041a : ('iso8859_2', "Croatian", "Central Europe"),
0x0405 : ('iso8859_2', "Czech", "Central Europe"),
0x0406 : ('iso8859_1', "Danish", "Western Europe & US"),
0x0413 : ('iso8859_1', "Dutch_Standard", "Western Europe & US"),
0x0813 : ('iso8859_1', "Dutch_Belgian", "Western Europe & US"),
0x0409 : ('iso8859_1', "English_United_States", "Western Europe & US"),
0x0809 : ('iso8859_1', "English_United_Kingdom", "Western Europe & US"),
0x0c09 : ('iso8859_1', "English_Australian", "Western Europe & US"),
0x1009 : ('iso8859_1', "English_Canadian", "Western Europe & US"),
0x1409 : ('iso8859_1', "English_New_Zealand", "Western Europe & US"),
0x1809 : ('iso8859_1', "English_Irish", "Western Europe & US"),
0x1c09 : ('iso8859_1', "English_South_Africa", "Western Europe & US"),
0x2009 : ('iso8859_1', "English_Jamaica", "Western Europe & US"),
0x2409 : ('iso8859_1', "English_Caribbean", "Western Europe & US"),
0x2809 : ('iso8859_1', "English_Belize", "Western Europe & US"),
0x2c09 : ('iso8859_1', "English_Trinidad", "Western Europe & US"),
0x3009 : ('iso8859_1', "English_Zimbabwe", "Western Europe & US"),
0x3409 : ('iso8859_1', "English_Philippines", "Western Europe & US"),
0x0425 : ('iso8859_13',"Estonian", "Baltic",),
0x0438 : ('iso8859_1', "Faeroese", "Western Europe & US"),
0x0429 : ('iso8859_6', "Farsi", "Arabic"),
0x040b : ('iso8859_1', "Finnish", "Western Europe & US"),
0x040c : ('iso8859_1', "French_Standard", "Western Europe & US"),
0x080c : ('iso8859_1', "French_Belgian", "Western Europe & US"),
0x0c0c : ('iso8859_1', "French_Canadian", "Western Europe & US"),
0x100c : ('iso8859_1', "French_Swiss", "Western Europe & US"),
0x140c : ('iso8859_1', "French_Luxembourg", "Western Europe & US"),
0x180c : ('iso8859_1', "French_Monaco", "Western Europe & US"),
0x0437 : (None, "Georgian", "Georgian"),
0x0407 : ('iso8859_1', "German_Standard", "Western Europe & US"),
0x0807 : ('iso8859_1', "German_Swiss", "Western Europe & US"),
0x0c07 : ('iso8859_1', "German_Austrian", "Western Europe & US"),
0x1007 : ('iso8859_1', "German_Luxembourg", "Western Europe & US"),
0x1407 : ('iso8859_1', "German_Liechtenstein", "Western Europe & US"),
0x0408 : ('iso8859_7', "Greek", "Greek"),
0x040d : ('iso8859_8', "Hebrew", "Hebrew"),
0x0439 : (None, "Hindi", "Indic"),
0x040e : ('iso8859_2', "Hungarian", "Central Europe"),
0x040f : ('iso8859_1', "Icelandic", "Western Europe & US"),
0x0421 : ('iso8859_1', "Indonesian", "Western Europe & US"),
0x0410 : ('iso8859_1', "Italian_Standard", "Western Europe & US"),
0x0810 : ('iso8859_1', "Italian_Swiss", "Western Europe & US"),
0x0411 : ('cp932', "Japanese", "Japanese"),
0x043f : ('cp1251', "Kazakh", "Cyrillic"),
0x0457 : (None, "Konkani", "Indic"),
0x0412 : ('cp949', "Korean", "Korean"),
0x0426 : ('iso8859_13',"Latvian", "Baltic",),
0x0427 : ('iso8859_13',"Lithuanian", "Baltic",),
0x042f : ('cp1251', "Macedonian", "Cyrillic"),
0x043e : ('iso8859_1', "Malay_Malaysia", "Western Europe & US"),
0x083e : ('iso8859_1', "Malay_Brunei_Darussalam", "Western Europe & US"),
0x044e : (None, "Marathi", "Indic"),
0x0414 : ('iso8859_1', "Norwegian_Bokmal", "Western Europe & US"),
0x0814 : ('iso8859_1', "Norwegian_Nynorsk", "Western Europe & US"),
0x0415 : ('iso8859_2', "Polish", "Central Europe"),
0x0416 : ('iso8859_1', "Portuguese_Brazilian", "Western Europe & US"),
0x0816 : ('iso8859_1', "Portuguese_Standard", "Western Europe & US"),
0x0418 : ('iso8859_2', "Romanian", "Central Europe"),
0x0419 : ('cp1251', "Russian", "Cyrillic"),
0x044f : (None, "Sanskrit", "Indic"),
0x081a : ('iso8859_2', "Serbian_Latin", "Central Europe"),
0x0c1a : ('cp1251', "Serbian_Cyrillic", "Cyrillic"),
0x041b : ('iso8859_2', "Slovak", "Central Europe"),
0x0424 : ('iso8859_2', "Slovenian", "Central Europe"),
0x040a : ('iso8859_1', "Spanish_Trad_Sort", "Western Europe & US"),
0x080a : ('iso8859_1', "Spanish_Mexican", "Western Europe & US"),
0x0c0a : ('iso8859_1', "Spanish_Modern_Sort", "Western Europe & US"),
0x100a : ('iso8859_1', "Spanish_Guatemala", "Western Europe & US"),
0x140a : ('iso8859_1', "Spanish_Costa_Rica", "Western Europe & US"),
0x180a : ('iso8859_1', "Spanish_Panama", "Western Europe & US"),
0x1c0a : ('iso8859_1', "Spanish_Dominican_Repub", "Western Europe & US"),
0x200a : ('iso8859_1', "Spanish_Venezuela", "Western Europe & US"),
0x240a : ('iso8859_1', "Spanish_Colombia", "Western Europe & US"),
0x280a : ('iso8859_1', "Spanish_Peru", "Western Europe & US"),
0x2c0a : ('iso8859_1', "Spanish_Argentina", "Western Europe & US"),
0x300a : ('iso8859_1', "Spanish_Ecuador", "Western Europe & US"),
0x340a : ('iso8859_1', "Spanish_Chile", "Western Europe & US"),
0x380a : ('iso8859_1', "Spanish_Uruguay", "Western Europe & US"),
0x3c0a : ('iso8859_1', "Spanish_Paraguay", "Western Europe & US"),
0x400a : ('iso8859_1', "Spanish_Bolivia", "Western Europe & US"),
0x440a : ('iso8859_1', "Spanish_El_Salvador", "Western Europe & US"),
0x480a : ('iso8859_1', "Spanish_Honduras", "Western Europe & US"),
0x4c0a : ('iso8859_1', "Spanish_Nicaragua", "Western Europe & US"),
0x500a : ('iso8859_1', "Spanish_Puerto_Rico", "Western Europe & US"),
0x0441 : ('iso8859_1', "Swahili", "Western Europe & US"),
0x041d : ('iso8859_1', "Swedish", "Western Europe & US"),
0x081d : ('iso8859_1', "Swedish_Finland", "Western Europe & US"),
0x0449 : (None, "Tamil", "Indic"),
0x0444 : ('cp1251', "Tatar", "Cyrillic"),
0x041e : ('iso8859_11',"Thai", "Thai"),
0x041f : ('iso8859_9', "Turkish", "Turkish"),
0x0422 : ('cp1251', "Ukrainian", "Cyrillic"),
0x0420 : ('iso8859_6', "Urdu", "Arabic"),
0x0443 : ('iso8859_9', "Uzbek_Latin", "Turkish"),
0x0843 : ('cp1251', "Uzbek_Cyrillic", "Cyrillic"),
0x042a : ('cp1258', "Vietnamese", "Vietnamese")
}
class CHMFile:
"A class to manage access to CHM files."
filename = ""
file = None
title = ""
home = "/"
index = None
topics = None
encoding = None
lcid = None
binaryindex = None
def __init__(self):
self.searchable = 0
def LoadCHM(self, archiveName):
'''Loads a CHM archive.
This function will also call GetArchiveInfo to obtain information
such as the index file name and the topics file. It returns 1 on
success, and 0 if it fails.
'''
if (self.filename != None):
self.CloseCHM()
self.file = chmlib.chm_open(archiveName)
if (self.file == None):
return 0
self.filename = archiveName
self.GetArchiveInfo()
return 1
def CloseCHM(self):
'''Closes the CHM archive.
This function will close the CHM file, if it is open. All variables
are also reset.
'''
if (self.filename != None):
chmlib.chm_close(self.file)
self.file = None
self.filename = ''
self.title = ""
self.home = "/"
self.index = None
self.topics = None
self.encoding = None
def GetArchiveInfo(self):
'''Obtains information on CHM archive.
This function checks the /#SYSTEM file inside the CHM archive to
obtain the index, home page, topics, encoding and title. It is called
from LoadCHM.
'''
#extra.is_searchable crashed...
#self.searchable = extra.is_searchable (self.file)
self.searchable = False
self.lcid = None
result, ui = chmlib.chm_resolve_object(self.file, '/#SYSTEM')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
sys.stderr.write('GetArchiveInfo: #SYSTEM does not exist\n')
return 0
size, text = chmlib.chm_retrieve_object(self.file, ui, 4l, ui.length)
if (size == 0):
sys.stderr.write('GetArchiveInfo: file size = 0\n')
return 0
buff = array.array('B', text)
index = 0
while (index < size):
cursor = buff[index] + (buff[index+1] * 256)
if (cursor == 0):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.topics = '/' + text[index:index+cursor-1]
elif (cursor == 1):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.index = '/' + text[index:index+cursor-1]
elif (cursor == 2):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.home = '/' + text[index:index+cursor-1]
elif (cursor == 3):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.title = text[index:index+cursor-1]
elif (cursor == 4):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.lcid = buff[index] + (buff[index+1] * 256)
elif (cursor == 6):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
tmp = text[index:index+cursor-1]
if not self.topics:
tmp1 = '/' + tmp + '.hhc'
tmp2 = '/' + tmp + '.hhk'
res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1)
res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2)
if (not self.topics) and \
(res1 == chmlib.CHM_RESOLVE_SUCCESS):
self.topics = '/' + tmp + '.hhc'
if (not self.index) and \
(res2 == chmlib.CHM_RESOLVE_SUCCESS):
self.index = '/' + tmp + '.hhk'
elif (cursor == 16):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.encoding = text[index:index+cursor-1]
else:
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
index += cursor
self.GetWindowsInfo()
if not self.lcid:
self.lcid = extra.get_lcid (self.file)
return 1
def GetTopicsTree(self):
'''Reads and returns the topics tree.
This auxiliary function reads and returns the topics tree file
contents for the CHM archive.
'''
if (self.topics == None):
return None
if self.topics:
res, ui = chmlib.chm_resolve_object(self.file, self.topics)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetTopicsTree: file size = 0\n')
return None
return text
def GetIndex(self):
'''Reads and returns the index tree.
This auxiliary function reads and returns the index tree file
contents for the CHM archive.
'''
if (self.index == None):
return None
if self.index:
res, ui = chmlib.chm_resolve_object(self.file, self.index)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetIndex: file size = 0\n')
return None
return text
def ResolveObject(self, document):
'''Tries to locate a document in the archive.
This function tries to locate the document inside the archive. It
returns a tuple where the first element is zero if the function
was successful, and the second is the UnitInfo for that document.
The UnitInfo is used to retrieve the document contents
'''
if self.file:
#path = os.path.abspath(document)
path = document
return chmlib.chm_resolve_object(self.file, path)
else:
return (1, None)
def RetrieveObject(self, ui, start = -1, length = -1):
'''Retrieves the contents of a document.
This function takes a UnitInfo and two optional arguments, the first
being the start address and the second is the length. These define
the amount of data to be read from the archive.
'''
if self.file and ui:
if length == -1:
len = ui.length
else:
len = length
if start == -1:
st = 0l
else:
st = long(start)
return chmlib.chm_retrieve_object(self.file, ui, st, len)
else:
return (0, '')
def Search(self, text, wholewords=0, titleonly=0):
'''Performs full-text search on the archive.
The first parameter is the word to look for, the second
indicates if the search should be for whole words only, and
the third parameter indicates if the search should be
restricted to page titles.
This method will return a tuple, the first item
indicating if the search results were partial, and the second
item being a dictionary containing the results.'''
if text and text != '' and self.file:
return extra.search (self.file, text, wholewords,
titleonly)
else:
return None
def IsSearchable(self):
'''Indicates if the full-text search is available for this
archive - this flag is updated when GetArchiveInfo is called'''
return self.searchable
def GetEncoding(self):
'''Returns a string that can be used with the codecs python package
to encode or decode the files in the chm archive. If an error is
found, or if it is not possible to find the encoding, None is
returned.'''
if self.encoding:
vals = string.split(self.encoding, ',')
if len(vals) > 2:
try:
return charset_table[int(vals[2])]
except KeyError:
pass
return None
def GetLCID(self):
'''Returns the archive Locale ID'''
if self.lcid in locale_table:
return locale_table[self.lcid]
else:
return None
def get_encoding(self):
ans = self.GetEncoding()
if ans is None:
lcid = self.GetLCID()
if lcid is not None:
ans = lcid[0]
if ans:
try:
codecs.lookup(ans)
except:
ans = None
return ans
def GetDWORD(self, buff, idx=0):
'''Internal method.
Reads a double word (4 bytes) from a buffer.
'''
result = buff[idx] + (buff[idx+1]<<8) + (buff[idx+2]<<16) + \
(buff[idx+3]<<24)
if result == 0xFFFFFFFF:
result = 0
return result
def GetString(self, text, idx):
'''Internal method.
Retrieves a string from the #STRINGS buffer.
'''
next = string.find(text, '\x00', idx)
chunk = text[idx:next]
return chunk
def GetWindowsInfo(self):
'''Gets information from the #WINDOWS file.
Checks the #WINDOWS file to see if it has any info that was
not found in #SYSTEM (topics, index or default page.
'''
result, ui = chmlib.chm_resolve_object(self.file, '/#WINDOWS')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
return -1
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, 8)
if (size < 8):
return -2
buff = array.array('B', text)
num_entries = self.GetDWORD(buff, 0)
entry_size = self.GetDWORD(buff, 4)
if num_entries < 1:
return -3
size, text = chmlib.chm_retrieve_object(self.file, ui, 8l, entry_size)
if (size < entry_size):
return -4
buff = array.array('B', text)
toc_index = self.GetDWORD(buff, 0x60)
idx_index = self.GetDWORD(buff, 0x64)
dft_index = self.GetDWORD(buff, 0x68)
result, ui = chmlib.chm_resolve_object(self.file, '/#STRINGS')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
return -5
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
return -6
if (not self.topics):
self.topics = self.GetString(text, toc_index)
if not self.topics.startswith("/"):
self.topics = "/" + self.topics
if (not self.index):
self.index = self.GetString(text, idx_index)
if not self.index.startswith("/"):
self.index = "/" + self.index
if (dft_index != 0):
self.home = self.GetString(text, dft_index)
if not self.home.startswith("/"):
self.home = "/" + self.home
|
Neitsch/ASE4156
|
refs/heads/master
|
stocks/migrations/0015_auto_20171014_1627.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-14 16:27
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stocks', '0014_auto_20171014_1355'),
]
operations = [
migrations.AlterField(
model_name='investmentbucketdescription',
name='text',
field=models.CharField(max_length=255, validators=[django.core.validators.MinLengthValidator(3, message='The description should at least be 3 characters long.')]),
),
]
|
jabez1314/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/kaltura.py
|
63
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:kaltura:|
https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_
)(?P<partner_id>\d+)
(?::|
/(?:[^/]+/)*?entry_id/
)(?P<id>[0-9a-z_]+)'''
_API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?'
_TESTS = [
{
'url': 'kaltura:269692:1_1jc2y3e4',
'md5': '3adcbdb3dcc02d647539e53f284ba171',
'info_dict': {
'id': '1_1jc2y3e4',
'ext': 'mp4',
'title': 'Track 4',
'upload_date': '20131219',
'uploader_id': 'mlundberg@wolfgangsvault.com',
'description': 'The Allman Brothers Band, 12/16/1981',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
},
},
{
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
]
def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
params = actions[0]
if len(actions) > 1:
for i, a in enumerate(actions[1:], start=1):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
query = compat_urllib_parse.urlencode(params)
url = self._API_BASE + query
data = self._download_json(url, video_id, *args, **kwargs)
status = data if len(actions) == 1 else data[0]
if status.get('objectType') == 'KalturaAPIException':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, status['message']))
return data
def _get_kaltura_signature(self, video_id, partner_id):
actions = [{
'apiVersion': '3.1',
'expiry': 86400,
'format': 1,
'service': 'session',
'action': 'startWidgetSession',
'widgetId': '_%s' % partner_id,
}]
return self._kaltura_api_call(
video_id, actions, note='Downloading Kaltura signature')['ks']
def _get_video_info(self, video_id, partner_id):
signature = self._get_kaltura_signature(video_id, partner_id)
actions = [
{
'action': 'null',
'apiVersion': '3.1.5',
'clientTag': 'kdp:v3.8.5',
'format': 1, # JSON, 2 = XML, 3 = PHP
'service': 'multirequest',
'ks': signature,
},
{
'action': 'get',
'entryId': video_id,
'service': 'baseentry',
'version': '-1',
},
{
'action': 'getContextData',
'contextDataParams:objectType': 'KalturaEntryContextDataParams',
'contextDataParams:referrer': 'http://www.kaltura.com/',
'contextDataParams:streamerType': 'http',
'entryId': video_id,
'service': 'baseentry',
},
]
return self._kaltura_api_call(
video_id, actions, note='Downloading video info JSON')
def _real_extract(self, url):
video_id = self._match_id(url)
mobj = re.match(self._VALID_URL, url)
partner_id, entry_id = mobj.group('partner_id'), mobj.group('id')
info, source_data = self._get_video_info(entry_id, partner_id)
formats = [{
'format_id': '%(fileExt)s-%(bitrate)s' % f,
'ext': f['fileExt'],
'tbr': f['bitrate'],
'fps': f.get('frameRate'),
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
'container': f.get('containerFormat'),
'vcodec': f.get('videoCodecId'),
'height': f.get('height'),
'width': f.get('width'),
'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']),
} for f in source_data['flavorAssets']]
self._sort_formats(formats)
return {
'id': video_id,
'title': info['name'],
'formats': formats,
'description': info.get('description'),
'thumbnail': info.get('thumbnailUrl'),
'duration': info.get('duration'),
'timestamp': info.get('createdAt'),
'uploader_id': info.get('userId'),
'view_count': info.get('plays'),
}
|
cbrepo/celery
|
refs/heads/django16
|
celery/tests/test_task/__init__.py
|
14
|
from __future__ import absolute_import
from __future__ import with_statement
from datetime import datetime, timedelta
from functools import wraps
from celery import task
from celery.app import app_or_default
from celery.task import task as task_dec
from celery.exceptions import RetryTaskError
from celery.execute import send_task
from celery.result import EagerResult
from celery.schedules import crontab, crontab_parser, ParseException
from celery.utils import uuid
from celery.utils.timeutils import parse_iso8601
from celery.tests.utils import Case, with_eager_tasks, WhateverIO
def return_True(*args, **kwargs):
# Task run functions can't be closures/lambdas, as they're pickled.
return True
return_True_task = task_dec()(return_True)
def raise_exception(self, **kwargs):
raise Exception("%s error" % self.__class__)
class MockApplyTask(task.Task):
def run(self, x, y):
return x * y
@classmethod
def apply_async(self, *args, **kwargs):
pass
class IncrementCounterTask(task.Task):
name = "c.unittest.increment_counter_task"
count = 0
def run(self, increment_by=1, **kwargs):
increment_by = increment_by or 1
self.__class__.count += increment_by
return self.__class__.count
class RaisingTask(task.Task):
name = "c.unittest.raising_task"
def run(self, **kwargs):
raise KeyError("foo")
class RetryTask(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.__class__.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
return self.retry(countdown=0, max_retries=max_retries)
class RetryTaskNoArgs(task.Task):
max_retries = 3
iterations = 0
def run(self, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return 42
else:
return self.retry(kwargs=kwargs, countdown=0)
class RetryTaskMockApply(task.Task):
max_retries = 3
iterations = 0
applied = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1
else:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs, countdown=0)
@classmethod
def apply_async(self, *args, **kwargs):
self.applied = 1
class MyCustomException(Exception):
"""Random custom exception."""
class RetryTaskCustomExc(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1 + kwarg
else:
try:
raise MyCustomException("Elaine Marie Benes")
except MyCustomException, exc:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs,
countdown=0, exc=exc)
class TestTaskRetries(Case):
def test_retry(self):
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF])
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 4)
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"max_retries": 10})
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 11)
def test_retry_no_args(self):
RetryTaskNoArgs.max_retries = 3
RetryTaskNoArgs.iterations = 0
result = RetryTaskNoArgs.apply()
self.assertEqual(result.get(), 42)
self.assertEqual(RetryTaskNoArgs.iterations, 4)
def test_retry_kwargs_can_be_empty(self):
with self.assertRaises(RetryTaskError):
RetryTaskMockApply.retry(args=[4, 4], kwargs=None)
def test_retry_not_eager(self):
RetryTaskMockApply.request.called_directly = False
exc = Exception("baz")
try:
RetryTaskMockApply.retry(args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=False)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
try:
with self.assertRaises(RetryTaskError):
RetryTaskMockApply.retry(
args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=True)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
def test_retry_with_kwargs(self):
RetryTaskCustomExc.max_retries = 3
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
self.assertEqual(result.get(), 0xFF + 0xF)
self.assertEqual(RetryTaskCustomExc.iterations, 4)
def test_retry_with_custom_exception(self):
RetryTaskCustomExc.max_retries = 2
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
with self.assertRaises(MyCustomException):
result.get()
self.assertEqual(RetryTaskCustomExc.iterations, 3)
def test_max_retries_exceeded(self):
RetryTask.max_retries = 2
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
with self.assertRaises(RetryTask.MaxRetriesExceededError):
result.get()
self.assertEqual(RetryTask.iterations, 3)
RetryTask.max_retries = 1
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
with self.assertRaises(RetryTask.MaxRetriesExceededError):
result.get()
self.assertEqual(RetryTask.iterations, 2)
class TestCeleryTasks(Case):
def test_unpickle_task(self):
import pickle
@task_dec
def xxx():
pass
self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx)
def createTaskCls(self, cls_name, task_name=None):
attrs = {"__module__": self.__module__}
if task_name:
attrs["name"] = task_name
cls = type(cls_name, (task.Task, ), attrs)
cls.run = return_True
return cls
def test_AsyncResult(self):
task_id = uuid()
result = RetryTask.AsyncResult(task_id)
self.assertEqual(result.backend, RetryTask.backend)
self.assertEqual(result.task_id, task_id)
def assertNextTaskDataEqual(self, consumer, presult, task_name,
test_eta=False, test_expires=False, **kwargs):
next_task = consumer.fetch()
task_data = next_task.decode()
self.assertEqual(task_data["id"], presult.task_id)
self.assertEqual(task_data["task"], task_name)
task_kwargs = task_data.get("kwargs", {})
if test_eta:
self.assertIsInstance(task_data.get("eta"), basestring)
to_datetime = parse_iso8601(task_data.get("eta"))
self.assertIsInstance(to_datetime, datetime)
if test_expires:
self.assertIsInstance(task_data.get("expires"), basestring)
to_datetime = parse_iso8601(task_data.get("expires"))
self.assertIsInstance(to_datetime, datetime)
for arg_name, arg_value in kwargs.items():
self.assertEqual(task_kwargs.get(arg_name), arg_value)
def test_incomplete_task_cls(self):
class IncompleteTask(task.Task):
name = "c.unittest.t.itask"
with self.assertRaises(NotImplementedError):
IncompleteTask().run()
def test_task_kwargs_must_be_dictionary(self):
with self.assertRaises(ValueError):
IncrementCounterTask.apply_async([], "str")
def test_task_args_must_be_list(self):
with self.assertRaises(ValueError):
IncrementCounterTask.apply_async("str", {})
def test_regular_task(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
self.assertIsInstance(T1(), T1)
self.assertTrue(T1().run())
self.assertTrue(callable(T1()),
"Task class is callable()")
self.assertTrue(T1()(),
"Task class runs run() when called")
# task name generated out of class module + name.
T2 = self.createTaskCls("T2")
self.assertTrue(T2().name.endswith("test_task.T2"))
t1 = T1()
consumer = t1.get_consumer()
with self.assertRaises(NotImplementedError):
consumer.receive("foo", "foo")
consumer.discard_all()
self.assertIsNone(consumer.fetch())
# Without arguments.
presult = t1.delay()
self.assertNextTaskDataEqual(consumer, presult, t1.name)
# With arguments.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza")
# send_task
sresult = send_task(t1.name, kwargs=dict(name="Elaine M. Benes"))
self.assertNextTaskDataEqual(consumer, sresult, t1.name,
name="Elaine M. Benes")
# With eta.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
eta=datetime.utcnow() + timedelta(days=1),
expires=datetime.utcnow() + timedelta(days=2))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# With countdown.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
countdown=10, expires=12)
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# Discarding all tasks.
consumer.discard_all()
t1.apply_async()
self.assertEqual(consumer.discard_all(), 1)
self.assertIsNone(consumer.fetch())
self.assertFalse(presult.successful())
t1.backend.mark_as_done(presult.task_id, result=None)
self.assertTrue(presult.successful())
publisher = t1.get_publisher()
self.assertTrue(publisher.exchange)
def test_context_get(self):
request = self.createTaskCls("T1", "c.unittest.t.c.g").request
request.foo = 32
self.assertEqual(request.get("foo"), 32)
self.assertEqual(request.get("bar", 36), 36)
request.clear()
def test_task_class_repr(self):
task = self.createTaskCls("T1", "c.unittest.t.repr")
self.assertIn("class Task of", repr(task.app.Task))
def test_after_return(self):
task = self.createTaskCls("T1", "c.unittest.t.after_return")()
task.request.chord = return_True_task.subtask()
task.after_return("SUCCESS", 1.0, "foobar", (), {}, None)
task.request.clear()
def test_send_task_sent_event(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
conn = T1.app.broker_connection()
chan = conn.channel()
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = True
dispatcher = [None]
class Pub(object):
channel = chan
def delay_task(self, *args, **kwargs):
dispatcher[0] = kwargs.get("event_dispatcher")
try:
T1.apply_async(publisher=Pub())
finally:
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = False
chan.close()
conn.close()
self.assertTrue(dispatcher[0])
def test_get_publisher(self):
connection = app_or_default().broker_connection()
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange="foo")
self.assertEqual(p.exchange.name, "foo")
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange_type="fanout")
self.assertEqual(p.exchange.type, "fanout")
def test_update_state(self):
@task_dec
def yyy():
pass
tid = uuid()
yyy.update_state(tid, "FROBULATING", {"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBULATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
yyy.request.id = tid
yyy.update_state(state="FROBUZATING", meta={"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBUZATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
def test_repr(self):
@task_dec
def task_test_repr():
pass
self.assertIn("task_test_repr", repr(task_test_repr))
def test_has___name__(self):
@task_dec
def yyy2():
pass
self.assertTrue(yyy2.__name__)
def test_get_logger(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
t1 = T1()
logfh = WhateverIO()
logger = t1.get_logger(logfile=logfh, loglevel=0)
self.assertTrue(logger)
T1.request.loglevel = 3
logger = t1.get_logger(logfile=logfh, loglevel=None)
self.assertTrue(logger)
class TestTaskSet(Case):
@with_eager_tasks
def test_function_taskset(self):
subtasks = [return_True_task.subtask([i]) for i in range(1, 6)]
ts = task.TaskSet(subtasks)
res = ts.apply_async()
self.assertListEqual(res.join(), [True, True, True, True, True])
def test_counter_taskset(self):
IncrementCounterTask.count = 0
ts = task.TaskSet(tasks=[
IncrementCounterTask.subtask((), {}),
IncrementCounterTask.subtask((), {"increment_by": 2}),
IncrementCounterTask.subtask((), {"increment_by": 3}),
IncrementCounterTask.subtask((), {"increment_by": 4}),
IncrementCounterTask.subtask((), {"increment_by": 5}),
IncrementCounterTask.subtask((), {"increment_by": 6}),
IncrementCounterTask.subtask((), {"increment_by": 7}),
IncrementCounterTask.subtask((), {"increment_by": 8}),
IncrementCounterTask.subtask((), {"increment_by": 9}),
])
self.assertEqual(ts.total, 9)
consumer = IncrementCounterTask().get_consumer()
consumer.purge()
consumer.close()
taskset_res = ts.apply_async()
subtasks = taskset_res.subtasks
taskset_id = taskset_res.taskset_id
consumer = IncrementCounterTask().get_consumer()
for subtask in subtasks:
m = consumer.fetch().payload
self.assertDictContainsSubset({"taskset": taskset_id,
"task": IncrementCounterTask.name,
"id": subtask.task_id}, m)
IncrementCounterTask().run(
increment_by=m.get("kwargs", {}).get("increment_by"))
self.assertEqual(IncrementCounterTask.count, sum(xrange(1, 10)))
def test_named_taskset(self):
prefix = "test_named_taskset-"
ts = task.TaskSet([return_True_task.subtask([1])])
res = ts.apply(taskset_id=prefix + uuid())
self.assertTrue(res.taskset_id.startswith(prefix))
class TestTaskApply(Case):
def test_apply_throw(self):
with self.assertRaises(KeyError):
RaisingTask.apply(throw=True)
def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self):
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
try:
with self.assertRaises(KeyError):
RaisingTask.apply()
finally:
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
def test_apply(self):
IncrementCounterTask.count = 0
e = IncrementCounterTask.apply()
self.assertIsInstance(e, EagerResult)
self.assertEqual(e.get(), 1)
e = IncrementCounterTask.apply(args=[1])
self.assertEqual(e.get(), 2)
e = IncrementCounterTask.apply(kwargs={"increment_by": 4})
self.assertEqual(e.get(), 6)
self.assertTrue(e.successful())
self.assertTrue(e.ready())
self.assertTrue(repr(e).startswith("<EagerResult:"))
f = RaisingTask.apply()
self.assertTrue(f.ready())
self.assertFalse(f.successful())
self.assertTrue(f.traceback)
with self.assertRaises(KeyError):
f.get()
class MyPeriodic(task.PeriodicTask):
run_every = timedelta(hours=1)
class TestPeriodicTask(Case):
def test_must_have_run_every(self):
with self.assertRaises(NotImplementedError):
type("Foo", (task.PeriodicTask, ), {"__module__": __name__})
def test_remaining_estimate(self):
self.assertIsInstance(
MyPeriodic().remaining_estimate(datetime.utcnow()),
timedelta)
def test_is_due_not_due(self):
due, remaining = MyPeriodic().is_due(datetime.utcnow())
self.assertFalse(due)
# This assertion may fail if executed in the
# first minute of an hour, thus 59 instead of 60
self.assertGreater(remaining, 59)
def test_is_due(self):
p = MyPeriodic()
due, remaining = p.is_due(datetime.utcnow() - p.run_every.run_every)
self.assertTrue(due)
self.assertEqual(remaining,
p.timedelta_seconds(p.run_every.run_every))
def test_schedule_repr(self):
p = MyPeriodic()
self.assertTrue(repr(p.run_every))
class EveryMinutePeriodic(task.PeriodicTask):
run_every = crontab()
class QuarterlyPeriodic(task.PeriodicTask):
run_every = crontab(minute="*/15")
class HourlyPeriodic(task.PeriodicTask):
run_every = crontab(minute=30)
class DailyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30)
class WeeklyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30, day_of_week="thursday")
def patch_crontab_nowfun(cls, retval):
def create_patcher(fun):
@wraps(fun)
def __inner(*args, **kwargs):
prev_nowfun = cls.run_every.nowfun
cls.run_every.nowfun = lambda: retval
try:
return fun(*args, **kwargs)
finally:
cls.run_every.nowfun = prev_nowfun
return __inner
return create_patcher
class test_crontab_parser(Case):
def test_parse_star(self):
self.assertEqual(crontab_parser(24).parse('*'), set(range(24)))
self.assertEqual(crontab_parser(60).parse('*'), set(range(60)))
self.assertEqual(crontab_parser(7).parse('*'), set(range(7)))
def test_parse_range(self):
self.assertEqual(crontab_parser(60).parse('1-10'),
set(range(1, 10 + 1)))
self.assertEqual(crontab_parser(24).parse('0-20'),
set(range(0, 20 + 1)))
self.assertEqual(crontab_parser().parse('2-10'),
set(range(2, 10 + 1)))
def test_parse_groups(self):
self.assertEqual(crontab_parser().parse('1,2,3,4'),
set([1, 2, 3, 4]))
self.assertEqual(crontab_parser().parse('0,15,30,45'),
set([0, 15, 30, 45]))
def test_parse_steps(self):
self.assertEqual(crontab_parser(8).parse('*/2'),
set([0, 2, 4, 6]))
self.assertEqual(crontab_parser().parse('*/2'),
set(i * 2 for i in xrange(30)))
self.assertEqual(crontab_parser().parse('*/3'),
set(i * 3 for i in xrange(20)))
def test_parse_composite(self):
self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
self.assertEqual(crontab_parser().parse('2-9/5'), set([2, 7]))
self.assertEqual(crontab_parser().parse('2-10/5'), set([2, 7]))
self.assertEqual(crontab_parser().parse('2-11/5,3'), set([2, 3, 7]))
self.assertEqual(crontab_parser().parse('2-4/3,*/5,0-21/4'),
set([0, 2, 4, 5, 8, 10, 12, 15, 16,
20, 25, 30, 35, 40, 45, 50, 55]))
self.assertEqual(crontab_parser().parse('1-9/2'),
set([1, 3, 5, 7, 9]))
def test_parse_errors_on_empty_string(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('')
def test_parse_errors_on_empty_group(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('1,,2')
def test_parse_errors_on_empty_steps(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('*/')
def test_parse_errors_on_negative_number(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('-20')
def test_expand_cronspec_eats_iterables(self):
self.assertEqual(crontab._expand_cronspec(iter([1, 2, 3]), 100),
set([1, 2, 3]))
def test_expand_cronspec_invalid_type(self):
with self.assertRaises(TypeError):
crontab._expand_cronspec(object(), 100)
def test_repr(self):
self.assertIn("*", repr(crontab("*")))
def test_eq(self):
self.assertEqual(crontab(day_of_week="1, 2"),
crontab(day_of_week="1-2"))
self.assertEqual(crontab(minute="1", hour="2", day_of_week="5"),
crontab(minute="1", hour="2", day_of_week="5"))
self.assertNotEqual(crontab(minute="1"), crontab(minute="2"))
self.assertFalse(object() == crontab(minute="1"))
self.assertFalse(crontab(minute="1") == object())
class test_crontab_remaining_estimate(Case):
def next_ocurrance(self, crontab, now):
crontab.nowfun = lambda: now
return now + crontab.remaining_estimate(now)
def test_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 14, 31))
def test_not_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 59, 15))
self.assertEqual(next, datetime(2010, 9, 11, 15, 0))
def test_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 42]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 14, 42))
def test_not_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 10, 15]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 15, 5))
def test_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12, 17]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 17, 5))
def test_not_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 12, 12, 5))
def test_weekday(self):
next = self.next_ocurrance(crontab(minute=30,
hour=14,
day_of_week="sat"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 18, 14, 30))
def test_not_weekday(self):
next = self.next_ocurrance(crontab(minute=[5, 42],
day_of_week="mon-fri"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 13, 0, 5))
class test_crontab_is_due(Case):
def setUp(self):
self.now = datetime.utcnow()
self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond
def test_default_crontab_spec(self):
c = crontab()
self.assertEqual(c.minute, set(range(60)))
self.assertEqual(c.hour, set(range(24)))
self.assertEqual(c.day_of_week, set(range(7)))
def test_simple_crontab_spec(self):
c = crontab(minute=30)
self.assertEqual(c.minute, set([30]))
self.assertEqual(c.hour, set(range(24)))
self.assertEqual(c.day_of_week, set(range(7)))
def test_crontab_spec_minute_formats(self):
c = crontab(minute=30)
self.assertEqual(c.minute, set([30]))
c = crontab(minute='30')
self.assertEqual(c.minute, set([30]))
c = crontab(minute=(30, 40, 50))
self.assertEqual(c.minute, set([30, 40, 50]))
c = crontab(minute=set([30, 40, 50]))
self.assertEqual(c.minute, set([30, 40, 50]))
def test_crontab_spec_invalid_minute(self):
with self.assertRaises(ValueError):
crontab(minute=60)
with self.assertRaises(ValueError):
crontab(minute='0-100')
def test_crontab_spec_hour_formats(self):
c = crontab(hour=6)
self.assertEqual(c.hour, set([6]))
c = crontab(hour='5')
self.assertEqual(c.hour, set([5]))
c = crontab(hour=(4, 8, 12))
self.assertEqual(c.hour, set([4, 8, 12]))
def test_crontab_spec_invalid_hour(self):
with self.assertRaises(ValueError):
crontab(hour=24)
with self.assertRaises(ValueError):
crontab(hour='0-30')
def test_crontab_spec_dow_formats(self):
c = crontab(day_of_week=5)
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='5')
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='fri')
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='tuesday,sunday,fri')
self.assertEqual(c.day_of_week, set([0, 2, 5]))
c = crontab(day_of_week='mon-fri')
self.assertEqual(c.day_of_week, set([1, 2, 3, 4, 5]))
c = crontab(day_of_week='*/2')
self.assertEqual(c.day_of_week, set([0, 2, 4, 6]))
def seconds_almost_equal(self, a, b, precision):
for index, skew in enumerate((+0.1, 0, -0.1)):
try:
self.assertAlmostEqual(a, b + skew, precision)
except AssertionError:
if index + 1 >= 3:
raise
else:
break
def test_crontab_spec_invalid_dow(self):
with self.assertRaises(ValueError):
crontab(day_of_week='fooday-barday')
with self.assertRaises(ValueError):
crontab(day_of_week='1,4,foo')
with self.assertRaises(ValueError):
crontab(day_of_week='7')
with self.assertRaises(ValueError):
crontab(day_of_week='12')
def test_every_minute_execution_is_due(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_every_minute_execution_is_not_due(self):
last_ran = self.now - timedelta(seconds=self.now.second)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertFalse(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 29th of May 2010 is a saturday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 29, 10, 30))
def test_execution_is_due_on_saturday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 30th of May 2010 is a sunday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 30, 10, 30))
def test_execution_is_due_on_sunday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 31st of May 2010 is a monday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 31, 10, 30))
def test_execution_is_due_on_monday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_every_hour_execution_is_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 60 * 60)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_every_hour_execution_is_not_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 9, 30))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 15))
def test_first_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_second_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 14))
def test_first_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 0))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_second_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 15))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 7, 30))
def test_daily_execution_is_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 9, 7, 30))
self.assertTrue(due)
self.assertEqual(remaining, 24 * 60 * 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_daily_execution_is_not_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 10, 7, 30))
self.assertFalse(due)
self.assertEqual(remaining, 21 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 6, 7, 30))
def test_weekly_execution_is_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 4, 30, 7, 30))
self.assertTrue(due)
self.assertEqual(remaining, 7 * 24 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 7, 10, 30))
def test_weekly_execution_is_not_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 5, 6, 7, 30))
self.assertFalse(due)
self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60)
|
jallohm/django
|
refs/heads/master
|
tests/str/tests.py
|
149
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from unittest import skipIf
from django.test import TestCase
from django.utils import six
from .models import Article, InternationalArticle
class SimpleTests(TestCase):
@skipIf(six.PY3, "tests a __str__ method returning unicode under Python 2")
def test_basic(self):
a = Article.objects.create(
headline=b'Parrot programs in Python',
pub_date=datetime.datetime(2005, 7, 28)
)
self.assertEqual(str(a), str('Parrot programs in Python'))
self.assertEqual(repr(a), str('<Article: Parrot programs in Python>'))
def test_international(self):
a = InternationalArticle.objects.create(
headline='Girl wins €12.500 in lottery',
pub_date=datetime.datetime(2005, 7, 28)
)
if six.PY3:
self.assertEqual(str(a), 'Girl wins €12.500 in lottery')
else:
# On Python 2, the default str() output will be the UTF-8 encoded
# output of __unicode__() -- or __str__() when the
# python_2_unicode_compatible decorator is used.
self.assertEqual(str(a), b'Girl wins \xe2\x82\xac12.500 in lottery')
|
sekikn/ambari
|
refs/heads/trunk
|
ambari-common/src/main/python/ambari_jinja2/ambari_jinja2/testsuite/api.py
|
5
|
# -*- coding: utf-8 -*-
"""
ambari_jinja2.testsuite.api
~~~~~~~~~~~~~~~~~~~~
Tests the public API and related stuff.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import tempfile
import unittest
from ambari_jinja2.testsuite import JinjaTestCase
from ambari_jinja2 import Environment, Undefined, DebugUndefined, \
StrictUndefined, UndefinedError, Template, meta, \
is_undefined, Template, DictLoader
from ambari_jinja2.utils import Cycler
env = Environment()
class ExtendedAPITestCase(JinjaTestCase):
def test_item_and_attribute(self):
from ambari_jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
# the |list is necessary for python3
tmpl = env.from_string('{{ foo.items()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={'items': 42}) == '42'
def test_finalizer(self):
def finalize_none_empty(value):
if value is None:
value = u''
return value
env = Environment(finalize=finalize_none_empty)
tmpl = env.from_string('{% for item in seq %}|{{ item }}{% endfor %}')
assert tmpl.render(seq=(None, 1, "foo")) == '||1|foo'
tmpl = env.from_string('<{{ none }}>')
assert tmpl.render() == '<>'
def test_cycler(self):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert c.next() == item
c.next()
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self):
t = Template('Content')
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_autoescape_autoselect(self):
def select_autoescape(name):
if name is None or '.' not in name:
return False
return name.endswith('.html')
env = Environment(autoescape=select_autoescape,
loader=DictLoader({
'test.txt': '{{ foo }}',
'test.html': '{{ foo }}'
}))
t = env.get_template('test.txt')
assert t.render(foo='<foo>') == '<foo>'
t = env.get_template('test.html')
assert t.render(foo='<foo>') == '<foo>'
t = env.from_string('{{ foo }}')
assert t.render(foo='<foo>') == '<foo>'
class MetaTestCase(JinjaTestCase):
def test_find_undeclared_variables(self):
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar'])
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}'
'{% macro meh(x) %}{{ x }}{% endmacro %}'
'{% for item in seq %}{{ muh(item) + meh(seq) }}{% endfor %}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar', 'seq', 'muh'])
def test_find_refererenced_templates(self):
ast = env.parse('{% extends "layout.html" %}{% include helper %}')
i = meta.find_referenced_templates(ast)
assert i.next() == 'layout.html'
assert i.next() is None
assert list(i) == []
ast = env.parse('{% extends "layout.html" %}'
'{% from "test.html" import a, b as c %}'
'{% import "meh.html" as meh %}'
'{% include "muh.html" %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['layout.html', 'test.html', 'meh.html', 'muh.html']
def test_find_included_templates(self):
ast = env.parse('{% include ["foo.html", "bar.html"] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ("foo.html", "bar.html") %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ["foo.html", "bar.html", foo] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
ast = env.parse('{% include ("foo.html", "bar.html", foo) %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
class StreamingTestCase(JinjaTestCase):
def test_basic_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
self.assert_equal(stream.next(), '<ul>')
self.assert_equal(stream.next(), '<li>1 - 0</li>')
self.assert_equal(stream.next(), '<li>2 - 1</li>')
self.assert_equal(stream.next(), '<li>3 - 2</li>')
self.assert_equal(stream.next(), '<li>4 - 3</li>')
self.assert_equal(stream.next(), '</ul>')
def test_buffered_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
stream.enable_buffering(size=3)
self.assert_equal(stream.next(), u'<ul><li>1 - 0</li><li>2 - 1</li>')
self.assert_equal(stream.next(), u'<li>3 - 2</li><li>4 - 3</li></ul>')
def test_streaming_behavior(self):
tmpl = env.from_string("")
stream = tmpl.stream()
assert not stream.buffered
stream.enable_buffering(20)
assert stream.buffered
stream.disable_buffering()
assert not stream.buffered
class UndefinedTestCase(JinjaTestCase):
def test_stopiteration_is_undefined(self):
def test():
raise StopIteration()
t = Template('A{{ test() }}B')
assert t.render(test=test) == 'AB'
t = Template('A{{ test().missingattribute }}B')
self.assert_raises(UndefinedError, t.render, test=test)
def test_default_undefined(self):
env = Environment(undefined=Undefined)
self.assert_equal(env.from_string('{{ missing }}').render(), u'')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42), '')
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
self.assert_equal(env.from_string('{{ missing }}').render(), '{{ missing }}')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42),
u"{{ no such element: int object['missing'] }}")
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
self.assert_raises(UndefinedError, env.from_string('{{ missing }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing.attribute }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing|list }}').render)
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_raises(UndefinedError, env.from_string('{{ foo.missing }}').render, foo=42)
self.assert_raises(UndefinedError, env.from_string('{{ not missing }}').render)
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
self.assert_raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
try:
Environment().getattr(None, 'split')()
except UndefinedError, e:
assert e.message == "'None' has no attribute 'split'"
else:
assert False, 'expected exception'
def test_object_repr(self):
try:
Undefined(obj=42, name='upper')()
except UndefinedError, e:
assert e.message == "'int object' has no attribute 'upper'"
else:
assert False, 'expected exception'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtendedAPITestCase))
suite.addTest(unittest.makeSuite(MetaTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
suite.addTest(unittest.makeSuite(UndefinedTestCase))
return suite
|
bocaaust/FreshLife
|
refs/heads/master
|
django_project/env/lib/python2.7/site-packages/django/contrib/admin/actions.py
|
101
|
"""
Built-in, globally-available admin actions.
"""
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
|
abhishek-ch/hue
|
refs/heads/master
|
desktop/core/ext-py/pysaml2-2.4.0/example/sp-wsgi/service_conf.py
|
31
|
from saml2.assertion import Policy
HOST = '127.0.0.1'
PORT = 8087
HTTPS = False
# Which groups of entity categories to use
POLICY = Policy(
{
"default": {"entity_categories": ["swamid", "edugain"]}
}
)
# HTTPS cert information
SERVER_CERT = "pki/mycert.pem"
SERVER_KEY = "pki/mykey.pem"
CERT_CHAIN = ""
|
bSr43/WinObjC
|
refs/heads/master
|
deps/3rdparty/icu/icu/as_is/bomlist.py
|
348
|
#!/usr/bin/python
# Copyright (C) 2011 IBM Corporation and Others. All Rights Reserved.
#
# run in icu/
# will create file icu/as_is/bomlist.txt
#
# Usage:
# ( python as_is/bomlist.py > as_is/bomlist.txt ) || rm -f as_is/bomlist.txt
import os
import codecs
tree = os.walk(".")
nots=0
notutf8=0
noprops=0
utf8=0
fixed=0
tfiles=0
bom=codecs.BOM_UTF8
for ent in tree:
(path,dirs,files) = ent
if(path.find("/.svn") != -1):
continue
for file in files:
tfiles=tfiles+1
fp = (path + "/" + file)
if not os.path.isfile(fp):
continue
f = open(fp, 'rb')
bytes=f.read(3)
if bytes and (bytes == bom):
print 'icu/'+fp[2::]
f.close()
|
xen0l/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/package.py
|
48
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.plugins.action import ActionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto')
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.pkg_mgr}}')
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars)
display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto')
if module != 'auto':
if module not in self._shared_loader_obj.module_loader:
raise AnsibleActionFail('Could not find a module for %s.' % module)
else:
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
strets123/rdkit
|
refs/heads/master
|
rdkit/VLib/NodeLib/__init__.py
|
6
|
__all__=['SmartsMolFilter','SDSupply','SmartsRemover','SmilesDupeFilter',
'SmilesOutput']
|
yangxiaohua1977/sound-linux-4.5.7
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
felipenaselva/felipe.repository
|
refs/heads/master
|
plugin.video.mrpiracy/resources/lib/Downloader.py
|
8
|
import os
import xbmcgui
import xbmc
import time
import urllib
class Downloader:
def __init__(self,):
pass
def download(self,path,url,name):
if os.path.isfile(path) is True:
xbmc.log("SIM")
while os.path.exists(path):
try: os.remove(path); break
except: pass
dp = xbmcgui.DialogProgress()
dp.create('MrPiracy Downloader')
dp.update(0,name)
xbmc.sleep(500)
start_time = time.time()
urllib.URLopener.version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0'
try:
urllib.urlretrieve(url, path, lambda nb, bs, fs: self.dialogdown(name,nb, bs, fs, dp, start_time))
dp.close()
return True
except:
while os.path.exists(path):
try: os.remove(path); break
except: pass
dp.close()
return False
def dialogdown(self,name,numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0: eta = (filesize - numblocks * blocksize) / kbps_speed
else: eta = 0
kbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '%.02f MB %s %.02f MB' % (currently_downloaded,'downloaded', total)
e = ' (%.0f Kb/s) ' % kbps_speed
tempo = 'Tempo:' + ' %02d:%02d' % divmod(eta, 60)
dp.update(percent,name +' - '+ mbs + e,tempo)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
raise StopDownloading('Stopped Downloading')
class StopDownloading(Exception):
def __init__(self, value): self.value = value
def __str__(self): return repr(self.value)
|
amarzavery/AutoRest
|
refs/heads/master
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Validation/auto_rest_validation_test/__init__.py
|
25
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .auto_rest_validation_test import AutoRestValidationTest
from .version import VERSION
__all__ = ['AutoRestValidationTest']
__version__ = VERSION
|
youdonghai/intellij-community
|
refs/heads/master
|
python/testData/create_tests/create_tst.expected.py
|
39
|
from unittest import TestCase
class Spam(TestCase):
def eggs(self):
self.fail()
def eggs_and_ham(self):
self.fail()
|
Big-B702/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/tlslite/constants.py
|
48
|
"""Constants used in various places."""
class CertificateType:
x509 = 0
openpgp = 1
cryptoID = 2
class HandshakeType:
hello_request = 0
client_hello = 1
server_hello = 2
certificate = 11
server_key_exchange = 12
certificate_request = 13
server_hello_done = 14
certificate_verify = 15
client_key_exchange = 16
finished = 20
class ContentType:
change_cipher_spec = 20
alert = 21
handshake = 22
application_data = 23
all = (20,21,22,23)
class AlertLevel:
warning = 1
fatal = 2
class AlertDescription:
"""
@cvar bad_record_mac: A TLS record failed to decrypt properly.
If this occurs during a shared-key or SRP handshake it most likely
indicates a bad password. It may also indicate an implementation
error, or some tampering with the data in transit.
This alert will be signalled by the server if the SRP password is bad. It
may also be signalled by the server if the SRP username is unknown to the
server, but it doesn't wish to reveal that fact.
This alert will be signalled by the client if the shared-key username is
bad.
@cvar handshake_failure: A problem occurred while handshaking.
This typically indicates a lack of common ciphersuites between client and
server, or some other disagreement (about SRP parameters or key sizes,
for example).
@cvar protocol_version: The other party's SSL/TLS version was unacceptable.
This indicates that the client and server couldn't agree on which version
of SSL or TLS to use.
@cvar user_canceled: The handshake is being cancelled for some reason.
"""
close_notify = 0
unexpected_message = 10
bad_record_mac = 20
decryption_failed = 21
record_overflow = 22
decompression_failure = 30
handshake_failure = 40
no_certificate = 41 #SSLv3
bad_certificate = 42
unsupported_certificate = 43
certificate_revoked = 44
certificate_expired = 45
certificate_unknown = 46
illegal_parameter = 47
unknown_ca = 48
access_denied = 49
decode_error = 50
decrypt_error = 51
export_restriction = 60
protocol_version = 70
insufficient_security = 71
internal_error = 80
user_canceled = 90
no_renegotiation = 100
unknown_srp_username = 120
missing_srp_username = 121
untrusted_srp_parameters = 122
class CipherSuite:
TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050
TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053
TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056
TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051
TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054
TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057
TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_RSA_WITH_RC4_128_SHA = 0x0005
srpSuites = []
srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
def getSrpSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpSuites = staticmethod(getSrpSuites)
srpRsaSuites = []
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
def getSrpRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpRsaSuites = staticmethod(getSrpRsaSuites)
rsaSuites = []
rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA)
def getRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "rc4":
suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getRsaSuites = staticmethod(getRsaSuites)
tripleDESSuites = []
tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
aes128Suites = []
aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
aes256Suites = []
aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rc4Suites = []
rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA)
class Fault:
badUsername = 101
badPassword = 102
badA = 103
clientSrpFaults = list(range(101,104))
badVerifyMessage = 601
clientCertFaults = list(range(601,602))
badPremasterPadding = 501
shortPremasterSecret = 502
clientNoAuthFaults = list(range(501,503))
badIdentifier = 401
badSharedKey = 402
clientSharedKeyFaults = list(range(401,403))
badB = 201
serverFaults = list(range(201,202))
badFinished = 300
badMAC = 301
badPadding = 302
genericFaults = list(range(300,303))
faultAlerts = {\
badUsername: (AlertDescription.unknown_srp_username, \
AlertDescription.bad_record_mac),\
badPassword: (AlertDescription.bad_record_mac,),\
badA: (AlertDescription.illegal_parameter,),\
badIdentifier: (AlertDescription.handshake_failure,),\
badSharedKey: (AlertDescription.bad_record_mac,),\
badPremasterPadding: (AlertDescription.bad_record_mac,),\
shortPremasterSecret: (AlertDescription.bad_record_mac,),\
badVerifyMessage: (AlertDescription.decrypt_error,),\
badFinished: (AlertDescription.decrypt_error,),\
badMAC: (AlertDescription.bad_record_mac,),\
badPadding: (AlertDescription.bad_record_mac,)
}
faultNames = {\
badUsername: "bad username",\
badPassword: "bad password",\
badA: "bad A",\
badIdentifier: "bad identifier",\
badSharedKey: "bad sharedkey",\
badPremasterPadding: "bad premaster padding",\
shortPremasterSecret: "short premaster secret",\
badVerifyMessage: "bad verify message",\
badFinished: "bad finished message",\
badMAC: "bad MAC",\
badPadding: "bad padding"
}
|
maohongyuan/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_argparse.py
|
60
|
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
import codecs
import inspect
import os
import shutil
import stat
import sys
import textwrap
import tempfile
import unittest
import argparse
from io import StringIO
from test import support
from unittest import mock
class StdIOBuffer(StringIO):
pass
class TestCase(unittest.TestCase):
def assertEqual(self, obj1, obj2):
if obj1 != obj2:
print('')
print(repr(obj1))
print(repr(obj2))
print(obj1)
print(obj2)
super(TestCase, self).assertEqual(obj1, obj2)
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this assumption is true, unset COLUMNS.
env = support.EnvironmentVarGuard()
env.unset("COLUMNS")
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
for root, dirs, files in os.walk(self.temp_dir, topdown=False):
for name in files:
os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE)
shutil.rmtree(self.temp_dir, True)
def create_readonly_file(self, filename):
file_path = os.path.join(self.temp_dir, filename)
with open(file_path, 'w') as file:
file.write(filename)
os.chmod(file_path, stat.S_IREAD)
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
if getattr(result, key) is sys.stdout:
setattr(result, key, old_stdout)
if getattr(result, key) is sys.stderr:
setattr(result, key, old_stderr)
return result
except SystemExit:
code = sys.exc_info()[1].code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError("SystemExit", stdout, stderr, code)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
with tester.assertRaises(ArgumentParserError, msg=args):
parser.parse_args(args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying the 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying the 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests the an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42)
argument_signatures = [
Sig('--version', action='version', version='1.0'),
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42, version=42)),
('a', NS(foo='a', bar=42, baz=42, version=42)),
('a b', NS(foo='a', bar=['b'], baz=42, version=42)),
('--baz', NS(foo=42, bar=42, baz=True, version=42)),
('a --baz', NS(foo='a', bar=42, baz=True, version=42)),
('--baz a b', NS(foo='a', bar=['b'], baz=True, version=42)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
(["-a", "", "X", "Y"], NS(a='', x='X', y=['Y'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
def test_r_latin(self):
type = argparse.FileType('r', encoding='latin_1')
self.assertEqual("FileType('r', encoding='latin_1')", repr(type))
def test_w_big5_ignore(self):
type = argparse.FileType('w', encoding='big5', errors='ignore')
self.assertEqual("FileType('w', encoding='big5', errors='ignore')",
repr(type))
def test_r_1_replace(self):
type = argparse.FileType('r', 1, errors='replace')
self.assertEqual("FileType('r', 1, errors='replace')", repr(type))
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '', 'non-existent-file.txt']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))),
]
class TestFileTypeDefaults(TempDirMixin, ParserTestCase):
"""Test that a file is not created unless the default is needed"""
def setUp(self):
super(TestFileTypeDefaults, self).setUp()
file = open(os.path.join(self.temp_dir, 'good'), 'w')
file.write('good')
file.close()
argument_signatures = [
Sig('-c', type=argparse.FileType('r'), default='no-file.txt'),
]
# should provoke no such file error
failures = ['']
# should not provoke error because default file is created
successes = [('-c good', NS(c=RFile('good')))]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
def setUp(self):
super(TestFileTypeW, self).setUp()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '', 'readonly']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeWB(TempDirMixin, ParserTestCase):
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeOpenArgs(TestCase):
"""Test that open (the builtin) is correctly called"""
def test_open_args(self):
FT = argparse.FileType
cases = [
(FT('rb'), ('rb', -1, None, None)),
(FT('w', 1), ('w', 1, None, None)),
(FT('w', errors='replace'), ('w', -1, None, 'replace')),
(FT('wb', encoding='big5'), ('wb', -1, 'big5', None)),
(FT('w', 0, 'l1', 'strict'), ('w', 0, 'l1', 'strict')),
]
with mock.patch('builtins.open') as m:
for type, args in cases:
type('foo')
m.assert_called_with('foo', *args)
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None,
aliases=False):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers_kwargs = {}
if aliases:
subparsers_kwargs['metavar'] = 'COMMAND'
subparsers_kwargs['title'] = 'commands'
else:
subparsers_kwargs['help'] = 'command help'
subparsers = parser.add_subparsers(**subparsers_kwargs)
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
if aliases:
parser1_kwargs['aliases'] = ['1alias1', '1alias2']
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# add third sub-parser
parser3_kwargs = dict(description='3 description')
if subparser_help:
parser3_kwargs['help'] = '3 help'
parser3 = subparsers.add_parser('3', **parser3_kwargs)
parser3.add_argument('t', type=int, help='t help')
parser3.add_argument('u', nargs='...', help='u help')
# return the main parser
return parser
def setUp(self):
super().setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
self.assertEqual(
self.parser.parse_args('-1.5 3 11 -- a --foo 7 -- b'.split()),
NS(foo=False, bar=-1.5, t=11, u=['a', '--foo', '7', '--', 'b']),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
optional arguments:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
optional arguments:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
{1,2,3} command help
1 1 help
2 2 help
3 3 help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
try:
self.parser.parse_args(args_str.split())
except ArgumentParserError:
err = sys.exc_info()[1]
if err.stdout != expected_help:
print(repr(expected_help))
print(repr(err.stdout))
self.assertEqual(err.stdout, expected_help)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
optional arguments:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z [z ...]]
2 description
positional arguments:
z z help
optional arguments:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
def test_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertEqual(
parser.parse_known_args('0.5 1alias1 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
self.assertEqual(
parser.parse_known_args('0.5 1alias2 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
def test_error_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertArgumentParserError(parser.parse_args,
'0.5 1alias3 b'.split())
def test_alias_help(self):
parser = self._get_parser(aliases=True, subparser_help=True)
self.maxDiff = None
self.assertEqual(parser.format_help(), textwrap.dedent("""\
usage: PROG [-h] [--foo] bar COMMAND ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
commands:
COMMAND
1 (1alias1, 1alias2)
1 help
2 2 help
3 3 help
"""))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super().setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
optional arguments:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(progname, ' ' if progname else '' )))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-w W] [-x X] [-y Y | -z Z]
optional arguments:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(progname, ' ' if progname else '' )))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
optional arguments:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger [badger ...]]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger [badger ...])
'''
help = '''\
positional arguments:
badger BADGER
optional arguments:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveInGroup(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
titled_group = parser.add_argument_group(
title='Titled group', description='Group description')
mutex_group = \
titled_group.add_mutually_exclusive_group(required=required)
mutex_group.add_argument('--bar', help='bar help')
mutex_group.add_argument('--baz', help='baz help')
return parser
failures = ['--bar X --baz Y', '--baz X --bar Y']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz BAZ]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz BAZ)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
Titled group:
Group description
--bar BAR bar help
--baz BAZ baz help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
optional arguments:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--foo")
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertEqual('' in ns, False)
self.assertEqual('' not in ns, True)
self.assertEqual('x' in ns, False)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertEqual('x' in ns, True)
self.assertEqual('x' not in ns, False)
self.assertEqual('y' in ns, True)
self.assertEqual('' in ns, False)
self.assertEqual('xx' in ns, False)
self.assertEqual('z' in ns, False)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in getattr(tester, 'argument_signatures', []):
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_sigs = getattr(tester, 'argument_group_signatures', [])
for group_sig, argument_sigs in group_sigs:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
subparsers_sigs = getattr(tester, 'subparsers_signatures', [])
if subparsers_sigs:
subparsers = parser.add_subparsers()
for subparser_sig in subparsers_sigs:
subparsers.add_parser(*subparser_sig.args,
**subparser_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
if expected_text != parser_text:
print(repr(expected_text))
print(repr(parser_text))
for char1, char2 in zip(expected_text, parser_text):
if char1 != char2:
print('first diff: %r %r' % (char1, char2))
break
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG')
argument_signatures = [
Sig('-v', '--version', action='version', version='0.1'),
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestShortColumns(HelpTestCase):
'''Test extremely small number of columns.
TestCase prevents "COLUMNS" from being too small in the tests themselves,
but we don't want any exceptions thrown in such case. Only ugly representation.
'''
def setUp(self):
env = support.EnvironmentVarGuard()
env.set("COLUMNS", '15')
self.addCleanup(env.__exit__)
parser_signature = TestHelpBiggerOptionals.parser_signature
argument_signatures = TestHelpBiggerOptionals.argument_signatures
argument_group_signatures = TestHelpBiggerOptionals.argument_group_signatures
usage = '''\
usage: PROG
[-h]
[-v]
[-x]
[--y Y]
foo
bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo
FOO HELP
bar
BAR HELP
optional arguments:
-h, --help
show this
help
message and
exit
-v, --version
show
program's
version
number and
exit
-x
X HELP
--y Y
Y HELP
EPILOG
'''
version = TestHelpBiggerOptionals.version
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG')
argument_signatures = [
Sig('-v', '--version', action='version', version='0.1'),
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
optional arguments:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30)
argument_signatures = [
Sig('-v', '--version', action='version', version='V V' * 30),
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X [X ...]]] [-y [Y]] [-z Z Z Z]
a b b [c] [d [d ...]] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
optional arguments:
-h, --help show this help message and exit
-w W [W ...] w
-x [X [X ...]] x
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
optional arguments:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
optional arguments:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
--foo FOO foo help
'''
version = ''
class TestHelpVersionOptional(HelpTestCase):
"""Test that the --version argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-v', '--version', action='version', version='1.0'),
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
--foo FOO foo help
'''
version = '''\
1.0
'''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
optional arguments:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--baz BAZ] spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
optional arguments:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
class TestHelpSubparsersOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands')
argument_signatures = [Sig('-v', '--version', action='version', version='0.1')]
subparsers_signatures = [Sig(name=name)
for name in ('a', 'b', 'c', 'd', 'e')]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpSubparsersWithHelpOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands')
argument_signatures = [Sig('-v', '--version', action='version', version='0.1')]
subcommand_data = (('a', 'a subcommand help'),
('b', 'b subcommand help'),
('c', 'c subcommand help'),
('d', 'd subcommand help'),
('e', 'e subcommand help'),
)
subparsers_signatures = [Sig(name=name, help=help)
for name, help in subcommand_data]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
a a subcommand help
b b subcommand help
c c subcommand help
d d subcommand help
e e subcommand help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpMetavarTypeFormatter(HelpTestCase):
""""""
def custom_type(string):
return string
parser_signature = Sig(prog='PROG', description='description',
formatter_class=argparse.MetavarTypeHelpFormatter)
argument_signatures = [Sig('a', type=int),
Sig('-b', type=custom_type),
Sig('-c', type=float, metavar='SOME FLOAT')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-b custom_type] [-c SOME FLOAT] int
'''
help = usage + '''\
description
positional arguments:
int
optional arguments:
-h, --help show this help message and exit
-b custom_type
-c SOME FLOAT
'''
version = ''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
self.assertValueError('--foo', type=(int, float))
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
self.assertValueError('--foo', action=('store', 'append'))
parser = argparse.ArgumentParser()
try:
parser.add_argument("--foo", action="store-true")
except ValueError:
e = sys.exc_info()[1]
expected = 'unknown action'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
try:
parser.add_argument('bar', dest='baz')
except ValueError:
e = sys.exc_info()[1]
expected = 'dest supplied twice for positional argument'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def _get_error(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except ArgumentParserError:
return sys.exc_info()[1]
else:
self.assertRaises(ArgumentParserError, func, *args, **kwargs)
def assertPrintHelpExit(self, parser, args_str):
self.assertEqual(
parser.format_help(),
self._get_error(parser.parse_args, args_str.split()).stdout)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-v', '--version', action='version', version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 3.5')
msg = self._get_error(parser.parse_args, ['-v']).stdout
self.assertEqual('PPP 3.5\n', msg)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
parser.add_argument('-v', '--version', action='version', version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
msg = self._get_error(parser.parse_args, ['-V']).stdout
self.assertEqual('XXX 3.7\n', msg)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertRaises(AttributeError, getattr, parser, 'format_version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(bar='spam', foo=42)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
def test_equality_returns_notimplemeted(self):
# See issue 21481
ns = argparse.Namespace(a=1, b=2)
self.assertIs(ns.__eq__(None), NotImplemented)
self.assertIs(ns.__ne__(None), NotImplemented)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with codecs.open(path, 'r', 'utf-8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
try:
parser.parse_args(['XXX'])
except ArgumentParserError:
expected = 'usage: PROG x\nPROG: error: argument x: spam!\n'
msg = sys.exc_info()[1].stderr
self.assertEqual(expected, msg)
else:
self.fail()
# =========================
# MessageContentError tests
# =========================
class TestMessageContentError(TestCase):
def test_missing_argument_name_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos', type=str)
parser.add_argument('-req_opt', type=int, required=True)
parser.add_argument('need_one', type=str, nargs='+')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['myXargument'])
msg = str(cm.exception)
self.assertNotIn(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['myXargument', '-req_opt=1'])
msg = str(cm.exception)
self.assertNotIn(msg, 'req_pos')
self.assertNotIn(msg, 'req_opt')
self.assertRegex(msg, 'need_one')
def test_optional_optional_not_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos', type=str)
parser.add_argument('--req_opt', type=int, required=True)
parser.add_argument('--opt_opt', type=bool, nargs='?',
default=True)
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertRegex(msg, 'req_opt')
self.assertNotIn(msg, 'opt_opt')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args(['--req_opt=1'])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertNotIn(msg, 'req_opt')
self.assertNotIn(msg, 'opt_opt')
def test_optional_positional_not_in_message(self):
parser = ErrorRaisingArgumentParser(prog='PROG', usage='')
parser.add_argument('req_pos')
parser.add_argument('optional_positional', nargs='?', default='eggs')
with self.assertRaises(ArgumentParserError) as cm:
parser.parse_args([])
msg = str(cm.exception)
self.assertRegex(msg, 'req_pos')
self.assertNotIn(msg, 'optional_positional')
# ================================================
# Check that the type function is called only once
# ================================================
class TestTypeFunctionCallOnlyOnce(TestCase):
def test_type_function_call_only_once(self):
def spam(string_to_convert):
self.assertEqual(string_to_convert, 'spam!')
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default='bar')
args = parser.parse_args('--foo spam!'.split())
self.assertEqual(NS(foo='foo_converted'), args)
# ==================================================================
# Check semantics regarding the default argument and type conversion
# ==================================================================
class TestTypeFunctionCalledOnDefault(TestCase):
def test_type_function_call_with_non_string_default(self):
def spam(int_to_convert):
self.assertEqual(int_to_convert, 0)
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default=0)
args = parser.parse_args([])
# foo should *not* be converted because its default is not a string.
self.assertEqual(NS(foo=0), args)
def test_type_function_call_with_string_default(self):
def spam(int_to_convert):
return 'foo_converted'
parser = argparse.ArgumentParser()
parser.add_argument('--foo', type=spam, default='0')
args = parser.parse_args([])
# foo is converted because its default is a string.
self.assertEqual(NS(foo='foo_converted'), args)
def test_no_double_type_conversion_of_default(self):
def extend(str_to_convert):
return str_to_convert + '*'
parser = argparse.ArgumentParser()
parser.add_argument('--test', type=extend, default='*')
args = parser.parse_args([])
# The test argument will be two stars, one coming from the default
# value and one coming from the type conversion being called exactly
# once.
self.assertEqual(NS(test='**'), args)
def test_issue_15906(self):
# Issue #15906: When action='append', type=str, default=[] are
# providing, the dest value was the string representation "[]" when it
# should have been an empty list.
parser = argparse.ArgumentParser()
parser.add_argument('--test', dest='test', type=str,
default=[], action='append')
args = parser.parse_args([])
self.assertEqual(args.test, [])
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_arguments_tuple(self):
parser = argparse.ArgumentParser()
parser.parse_args(())
def test_arguments_list(self):
parser = argparse.ArgumentParser()
parser.parse_args([])
def test_arguments_tuple_positional(self):
parser = argparse.ArgumentParser()
parser.add_argument('x')
parser.parse_args(('x',))
def test_arguments_list_positional(self):
parser = argparse.ArgumentParser()
parser.add_argument('x')
parser.parse_args(['x'])
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ==========================
# add_argument metavar tests
# ==========================
class TestAddArgumentMetavar(TestCase):
EXPECTED_MESSAGE = "length of metavar tuple does not match nargs"
def do_test_no_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
def do_test_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
self.assertEqual(cm.exception.args[0], self.EXPECTED_MESSAGE)
# Unit tests for different values of metavar when nargs=None
def test_nargs_None_metavar_string(self):
self.do_test_no_exception(nargs=None, metavar="1")
def test_nargs_None_metavar_length0(self):
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
self.do_test_no_exception(nargs=None, metavar=("1"))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
def test_nargs_None_metavar_length3(self):
self.do_test_exception(nargs=None, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=?
def test_nargs_optional_metavar_string(self):
self.do_test_no_exception(nargs="?", metavar="1")
def test_nargs_optional_metavar_length0(self):
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
self.do_test_no_exception(nargs="?", metavar=("1"))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
def test_nargs_optional_metavar_length3(self):
self.do_test_exception(nargs="?", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=*
def test_nargs_zeroormore_metavar_string(self):
self.do_test_no_exception(nargs="*", metavar="1")
def test_nargs_zeroormore_metavar_length0(self):
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
self.do_test_no_exception(nargs="*", metavar=("1"))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
def test_nargs_zeroormore_metavar_length3(self):
self.do_test_exception(nargs="*", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=+
def test_nargs_oneormore_metavar_string(self):
self.do_test_no_exception(nargs="+", metavar="1")
def test_nargs_oneormore_metavar_length0(self):
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
self.do_test_no_exception(nargs="+", metavar=("1"))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
def test_nargs_oneormore_metavar_length3(self):
self.do_test_exception(nargs="+", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=...
def test_nargs_remainder_metavar_string(self):
self.do_test_no_exception(nargs="...", metavar="1")
def test_nargs_remainder_metavar_length0(self):
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
self.do_test_no_exception(nargs="...", metavar=("1"))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
def test_nargs_remainder_metavar_length3(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=A...
def test_nargs_parser_metavar_string(self):
self.do_test_no_exception(nargs="A...", metavar="1")
def test_nargs_parser_metavar_length0(self):
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
self.do_test_no_exception(nargs="A...", metavar=("1"))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
def test_nargs_parser_metavar_length3(self):
self.do_test_exception(nargs="A...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=1
def test_nargs_1_metavar_string(self):
self.do_test_no_exception(nargs=1, metavar="1")
def test_nargs_1_metavar_length0(self):
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
self.do_test_no_exception(nargs=1, metavar=("1"))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
def test_nargs_1_metavar_length3(self):
self.do_test_exception(nargs=1, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=2
def test_nargs_2_metavar_string(self):
self.do_test_no_exception(nargs=2, metavar="1")
def test_nargs_2_metavar_length0(self):
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
self.do_test_no_exception(nargs=2, metavar=("1"))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
def test_nargs_2_metavar_length3(self):
self.do_test_exception(nargs=2, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=3
def test_nargs_3_metavar_string(self):
self.do_test_no_exception(nargs=3, metavar="1")
def test_nargs_3_metavar_length0(self):
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
self.do_test_no_exception(nargs=3, metavar=("1"))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
def test_nargs_3_metavar_length3(self):
self.do_test_no_exception(nargs=3, metavar=("1", "2", "3"))
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not (name.startswith("_") or name == 'ngettext')
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
def test_main():
support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
test_main()
|
onecloud/neutron
|
refs/heads/master
|
neutron/openstack/common/db/sqlalchemy/utils.py
|
5
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from neutron.openstack.common import context as request_context
from neutron.openstack.common.db.sqlalchemy import models
from neutron.openstack.common.gettextutils import _, _LI, _LW
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from neutron.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
from migrate.changeset import UniqueConstraint
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
|
DemocracyClub/UK-Polling-Stations
|
refs/heads/master
|
polling_stations/apps/data_importers/management/commands/import_worcester.py
|
1
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "WOC"
addresses_name = (
"2021-03-26T11:04:20.460985/Worcester Democracy_Club__06May2021 (1).CSV"
)
stations_name = (
"2021-03-26T11:04:20.460985/Worcester Democracy_Club__06May2021 (1).CSV"
)
elections = ["2021-05-06"]
csv_delimiter = ","
def address_record_to_dict(self, record):
if record.addressline6 in [
"WR1 1HT",
"WR1 1DF",
"WR5 3EP",
"WR5 3ES",
"WR1 2AH",
"WR3 8SB",
"WR4 9BG",
"WR3 8ET",
]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
# Polling station B3 Marquee Blessed Edward College rear car park Access via Evendine Close Worcester
# set to same location as polling station B2 Marquee Blessed Edward College rear car park Access via Evendine Close Worcester
if record.polling_place_id == "6014":
record = record._replace(polling_place_easting="385795")
record = record._replace(polling_place_northing="253608")
# Polling Station 2 Hall 1 Lyppard Hub Ankerage Green, Off Millwood Drive
# set to same postcode as Polling Station 3 Hall 1 Lyppard Hub Ankerage Green, Off Millwood Drive Worcester
if record.polling_place_id == "6016":
record = record._replace(polling_place_postcode="WR4 0DZ")
return super().station_record_to_dict(record)
|
dak/webview
|
refs/heads/master
|
test/selenium/python/Delete_Page.py
|
3
|
import unittest
from selenium import webdriver
class DeletePage(unittest.TestCase):
'''
This test will create a Page and delete it.
It makes one assumption in order for the test to work:
* Assumes the new Page is the first item in the Workspace Page list
'''
authkey = ""
pw = ""
url = ""
def setUp(self):
self.driver = webdriver.Firefox()
propfile = open('properties.ini')
items = [line.rstrip('\n') for line in propfile]
self.authkey = items[0]
self.pw = items[1]
self.url = items[2]
def tearDown(self):
self.driver.quit()
def test_page_deletion(self):
self.driver.get(self.url)
self.driver.implicitly_wait(300)
#login
authKey = self.driver.find_element_by_name('auth_key')
authKey.send_keys(self.authkey)
pw = self.driver.find_element_by_name('password')
pw.send_keys(self.pw)
signin = self.driver.find_element_by_css_selector('button.standard')
signin.click()
self.driver.implicitly_wait(300)
#add page
addbutton = self.driver.find_element_by_xpath("(//button[@type='button'])[3]")
addbutton.click()
listItem = self.driver.find_element_by_css_selector('li.menuitem.new-page')
listItem.click()
#add title to modal
modal = self.driver.find_element_by_id('new-media-modal')
titlefield = modal.find_element_by_name('title')
titlefield.send_keys('Selenium Test Page')
createbutton = self.driver.find_element_by_xpath("//button[@type='submit']")
createbutton.click()
self.driver.set_page_load_timeout(10)
#click back button
self.driver.find_element_by_css_selector('span.tab-title')
#Webdriver does something funny with the history, so have to call back() multiple times
self.driver.back()
self.driver.back()
self.driver.back()
#delete page
delete = self.driver.find_element_by_xpath('//table[2]/tbody/tr[1]/td[5]')
delete.click()
okbutton = self.driver.find_element_by_xpath("(//button[@type='button'])[6]")
okbutton.click()
if __name__ == "__main__":
unittest.main()
|
tafaRU/odoo
|
refs/heads/8.0
|
addons/hr_attendance/__openerp__.py
|
52
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'images': ['images/hr_attendances.jpeg'],
'depends': ['hr', 'report'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
'views/report_attendanceerrors.xml',
'views/hr_attendance.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
'qweb': ["static/src/xml/attendance.xml"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eleonrk/SickRage
|
refs/heads/master
|
lib/bs4/tests/test_htmlparser.py
|
31
|
"""Tests to ensure that the html.parser tree builder generates good
trees."""
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
@property
def default_builder(self):
return HTMLParserTreeBuilder()
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
|
fredkingham/blog-of-fred
|
refs/heads/master
|
django/core/serializers/pyyaml.py
|
81
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception, e:
# Map to deserializer error
raise DeserializationError(e)
|
foursquare/commons-old
|
refs/heads/master
|
src/python/twitter/pants/tasks/python/__init__.py
|
2
|
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
|
rotofly/odoo
|
refs/heads/master
|
addons/hr_attendance/__init__.py
|
434
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vanstoner/xlr-sonatype-nexus-iq-plugin
|
refs/heads/master
|
src/main/resources/nexusiq/evaluateBinary.py
|
1
|
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import sys
import json
import urllib2
import base64
import os.path
import shutil
import uuid
from nexusiq.LocalCLI import Localcliscript
directoryCreated = False
baseDirectory = "work"
def createSafeDirectory():
global directoryCreated
try:
# Strip leading slashes for protection - we place in our own work directory
safeDirectory = os.path.join(baseDirectory,'nexusiq',str(uuid.uuid1()))
print safeDirectory
os.makedirs(safeDirectory)
print "Made directory [%s]" % safeDirectory
directoryCreated = safeDirectory
return safeDirectory
except OSError, e:
if e.errno != 17:
raise
# time.sleep might help here
print "Directory always exists [%s]" % e
pass
def cleanupDirectory():
print "Cleaning up [%s]" % directoryCreated
shutil.rmtree(directoryCreated)
def retrieveRemoteFile():
request = urllib2.Request(binaryLocation)
artifactName = binaryLocation.rsplit('/', 1)[-1]
if ( locationUsername ):
base64string = base64.b64encode('%s:%s' % (locationUsername, locationPassword))
request.add_header("Authorization", "Basic %s" % base64string)
dir = createSafeDirectory()
print "downloading with urllib2"
f = urllib2.urlopen(request)
print f.code
with open(os.path.join(dir, artifactName), "wb") as code:
code.write(f.read())
print "Artifact written to [%s/%s]" % (dir, artifactName)
return os.path.join(dir, artifactName)
# See if the input file is local or from a URL
if binaryLocation.startswith('http'):
# Grab it from destination
print "Retrieving file and temporarily storing it in [%s]" % outputPath
binaryLocalPath = retrieveRemoteFile()
else:
# File is already local
binaryLocalPath = binaryLocation
# Now we have the file run it into NexusIQ for evaluation
print "Running [Localcliscript(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)]" % (cli['cliJar'], cli['url'], cli['username'], cli['password'], cli['proxyHost'], cli['proxyUsername'], cli['proxyPassword'], nexusiqApp, nexusiqStage, binaryLocalPath)
cliScript = Localcliscript(cli['cliJar'], cli['url'], cli['username'], cli['password'], cli['proxyHost'], cli['proxyUsername'], cli['proxyPassword'], nexusiqApp, nexusiqStage, binaryLocalPath)
exitCode = 1
try:
exitCode = cliScript.execute()
output = cliScript.getStdout()
err = cliScript.getStderr()
if (exitCode == 0 ):
print "Finished normally"
print output
else:
print
print "### Exit code "
print exitCode
print
print "### Output:"
print output
print "### Error stream:"
print err
print
print "----"
finally:
if (directoryCreated) :
print "Cleaning up my mess"
cleanupDirectory()
sys.exit(exitCode)
|
mne-tools/mne-tools.github.io
|
refs/heads/main
|
0.21/_downloads/36e789cbf04a4ef0f75e81dbcd49ca2a/plot_20_events_from_raw.py
|
3
|
# -*- coding: utf-8 -*-
"""
.. _tut-events-vs-annotations:
Parsing events from raw data
============================
This tutorial describes how to read experimental events from raw recordings,
and how to convert between the two different representations of events within
MNE-Python (Events arrays and Annotations objects).
.. contents:: Page contents
:local:
:depth: 1
In the :ref:`introductory tutorial <overview-tut-events-section>` we saw an
example of reading experimental events from a :term:`"STIM" channel <stim
channel>`; here we'll discuss :term:`events` and :term:`annotations` more
broadly, give more detailed information about reading from STIM channels, and
give an example of reading events that are in a marker file or included in the
data file as an embedded array. The tutorials :ref:`tut-event-arrays` and
:ref:`tut-annotate-raw` discuss how to plot, combine, load, save, and
export :term:`events` and :class:`~mne.Annotations` (respectively), and the
latter tutorial also covers interactive annotation of :class:`~mne.io.Raw`
objects.
We'll begin by loading the Python modules we need, and loading the same
:ref:`example data <sample-dataset>` we used in the :ref:`introductory tutorial
<tut-overview>`, but to save memory we'll crop the :class:`~mne.io.Raw` object
to just 60 seconds before loading it into RAM:
"""
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# The Events and Annotations data structures
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Generally speaking, both the Events and :class:`~mne.Annotations` data
# structures serve the same purpose: they provide a mapping between times
# during an EEG/MEG recording and a description of what happened at those
# times. In other words, they associate a *when* with a *what*. The main
# differences are:
#
# 1. **Units**: the Events data structure represents the *when* in terms of
# samples, whereas the :class:`~mne.Annotations` data structure represents
# the *when* in seconds.
# 2. **Limits on the description**: the Events data structure represents the
# *what* as an integer "Event ID" code, whereas the
# :class:`~mne.Annotations` data structure represents the *what* as a
# string.
# 3. **How duration is encoded**: Events in an Event array do not have a
# duration (though it is possible to represent duration with pairs of
# onset/offset events within an Events array), whereas each element of an
# :class:`~mne.Annotations` object necessarily includes a duration (though
# the duration can be zero if an instantaneous event is desired).
# 4. **Internal representation**: Events are stored as an ordinary
# :class:`NumPy array <numpy.ndarray>`, whereas :class:`~mne.Annotations` is
# a :class:`list`-like class defined in MNE-Python.
#
#
# .. _stim-channel-defined:
#
# What is a STIM channel?
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A :term:`stim channel` (short for "stimulus channel") is a channel that does
# not receive signals from an EEG, MEG, or other sensor. Instead, STIM channels
# record voltages (usually short, rectangular DC pulses of fixed magnitudes
# sent from the experiment-controlling computer) that are time-locked to
# experimental events, such as the onset of a stimulus or a button-press
# response by the subject (those pulses are sometimes called `TTL`_ pulses,
# event pulses, trigger signals, or just "triggers"). In other cases, these
# pulses may not be strictly time-locked to an experimental event, but instead
# may occur in between trials to indicate the type of stimulus (or experimental
# condition) that is about to occur on the upcoming trial.
#
# The DC pulses may be all on one STIM channel (in which case different
# experimental events or trial types are encoded as different voltage
# magnitudes), or they may be spread across several channels, in which case the
# channel(s) on which the pulse(s) occur can be used to encode different events
# or conditions. Even on systems with multiple STIM channels, there is often
# one channel that records a weighted sum of the other STIM channels, in such a
# way that voltage levels on that channel can be unambiguously decoded as
# particular event types. On older Neuromag systems (such as that used to
# record the sample data) this "summation channel" was typically ``STI 014``;
# on newer systems it is more commonly ``STI101``. You can see the STIM
# channels in the raw data file here:
raw.copy().pick_types(meg=False, stim=True).plot(start=3, duration=6)
###############################################################################
# You can see that ``STI 014`` (the summation channel) contains pulses of
# different magnitudes whereas pulses on other channels have consistent
# magnitudes. You can also see that every time there is a pulse on one of the
# other STIM channels, there is a corresponding pulse on ``STI 014``.
#
# .. TODO: somewhere in prev. section, link out to a table of which systems
# have STIM channels vs. which have marker files or embedded event arrays
# (once such a table has been created).
#
#
# Converting a STIM channel signal to an Events array
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If your data has events recorded on a STIM channel, you can convert them into
# an events array using :func:`mne.find_events`. The sample number of the onset
# (or offset) of each pulse is recorded as the event time, the pulse magnitudes
# are converted into integers, and these pairs of sample numbers plus integer
# codes are stored in :class:`NumPy arrays <numpy.ndarray>` (usually called
# "the events array" or just "the events"). In its simplest form, the function
# requires only the :class:`~mne.io.Raw` object, and the name of the channel(s)
# from which to read events:
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
###############################################################################
# .. sidebar:: The middle column of the Events array
#
# MNE-Python events are actually *three* values: in between the sample
# number and the integer event code is a value indicating what the event
# code was on the immediately preceding sample. In practice, that value is
# almost always ``0``, but it can be used to detect the *endpoint* of an
# event whose duration is longer than one sample. See the documentation of
# :func:`mne.find_events` for more details.
#
# If you don't provide the name of a STIM channel, :func:`~mne.find_events`
# will first look for MNE-Python :ref:`config variables <tut-configure-mne>`
# for variables ``MNE_STIM_CHANNEL``, ``MNE_STIM_CHANNEL_1``, etc. If those are
# not found, channels ``STI 014`` and ``STI101`` are tried, followed by the
# first channel with type "STIM" present in ``raw.ch_names``. If you regularly
# work with data from several different MEG systems with different STIM channel
# names, setting the ``MNE_STIM_CHANNEL`` config variable may not be very
# useful, but for researchers whose data is all from a single system it can be
# a time-saver to configure that variable once and then forget about it.
#
# :func:`~mne.find_events` has several options, including options for aligning
# events to the onset or offset of the STIM channel pulses, setting the minimum
# pulse duration, and handling of consecutive pulses (with no return to zero
# between them). For example, you can effectively encode event duration by
# passing ``output='step'`` to :func:`mne.find_events`; see the documentation
# of :func:`~mne.find_events` for details. More information on working with
# events arrays (including how to plot, combine, load, and save event arrays)
# can be found in the tutorial :ref:`tut-event-arrays`.
#
#
# Reading embedded events as Annotations
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Some EEG/MEG systems generate files where events are stored in a separate
# data array rather than as pulses on one or more STIM channels. For example,
# the EEGLAB format stores events as a collection of arrays in the :file:`.set`
# file. When reading those files, MNE-Python will automatically convert the
# stored events into an :class:`~mne.Annotations` object and store it as the
# :attr:`~mne.io.Raw.annotations` attribute of the :class:`~mne.io.Raw` object:
testing_data_folder = mne.datasets.testing.data_path()
eeglab_raw_file = os.path.join(testing_data_folder, 'EEGLAB', 'test_raw.set')
eeglab_raw = mne.io.read_raw_eeglab(eeglab_raw_file)
print(eeglab_raw.annotations)
###############################################################################
# The core data within an :class:`~mne.Annotations` object is accessible
# through three of its attributes: ``onset``, ``duration``, and
# ``description``. Here we can see that there were 154 events stored in the
# EEGLAB file, they all had a duration of zero seconds, there were two
# different types of events, and the first event occurred about 1 second after
# the recording began:
print(len(eeglab_raw.annotations))
print(set(eeglab_raw.annotations.duration))
print(set(eeglab_raw.annotations.description))
print(eeglab_raw.annotations.onset[0])
###############################################################################
# More information on working with :class:`~mne.Annotations` objects, including
# how to add annotations to :class:`~mne.io.Raw` objects interactively, and how
# to plot, concatenate, load, save, and export :class:`~mne.Annotations`
# objects can be found in the tutorial :ref:`tut-annotate-raw`.
#
#
# Converting between Events arrays and Annotations objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once your experimental events are read into MNE-Python (as either an Events
# array or an :class:`~mne.Annotations` object), you can easily convert between
# the two formats as needed. You might do this because, e.g., an Events array
# is needed for epoching continuous data, or because you want to take advantage
# of the "annotation-aware" capability of some functions, which automatically
# omit spans of data if they overlap with certain annotations.
#
# To convert an :class:`~mne.Annotations` object to an Events array, use the
# function :func:`mne.events_from_annotations` on the :class:`~mne.io.Raw` file
# containing the annotations. This function will assign an integer Event ID to
# each unique element of ``raw.annotations.description``, and will return the
# mapping of descriptions to integer Event IDs along with the derived Event
# array. By default, one event will be created at the onset of each annotation;
# this can be modified via the ``chunk_duration`` parameter of
# :func:`~mne.events_from_annotations` to create equally spaced events within
# each annotation span (see :ref:`chunk-duration`, below, or see
# :ref:`fixed-length-events` for direct creation of an Events array of
# equally-spaced events).
events_from_annot, event_dict = mne.events_from_annotations(eeglab_raw)
print(event_dict)
print(events_from_annot[:5])
###############################################################################
# If you want to control which integers are mapped to each unique description
# value, you can pass a :class:`dict` specifying the mapping as the
# ``event_id`` parameter of :func:`~mne.events_from_annotations`; this
# :class:`dict` will be returned unmodified as the ``event_dict``.
#
# .. TODO add this when the other tutorial is nailed down:
# Note that this ``event_dict`` can be used when creating
# :class:`~mne.Epochs` from :class:`~mne.io.Raw` objects, as demonstrated
# in :doc:`epoching_tutorial_whatever_its_name_is`.
custom_mapping = {'rt': 77, 'square': 42}
(events_from_annot,
event_dict) = mne.events_from_annotations(eeglab_raw, event_id=custom_mapping)
print(event_dict)
print(events_from_annot[:5])
###############################################################################
# To make the opposite conversion (from Events array to
# :class:`~mne.Annotations` object), you can create a mapping from integer
# Event ID to string descriptions, and use the :class:`~mne.Annotations`
# constructor to create the :class:`~mne.Annotations` object, and use the
# :meth:`~mne.io.Raw.set_annotations` method to add the annotations to the
# :class:`~mne.io.Raw` object. Because the :ref:`sample data <sample-dataset>`
# was recorded on a Neuromag system (where sample numbering starts when the
# acquisition system is initiated, not when the *recording* is initiated), we
# also need to pass in the ``orig_time`` parameter so that the onsets are
# properly aligned relative to the start of recording:
mapping = {1: 'auditory/left', 2: 'auditory/right', 3: 'visual/left',
4: 'visual/right', 5: 'smiley', 32: 'buttonpress'}
onsets = events[:, 0] / raw.info['sfreq']
durations = np.zeros_like(onsets) # assumes instantaneous events
descriptions = [mapping[event_id] for event_id in events[:, 2]]
annot_from_events = mne.Annotations(onset=onsets, duration=durations,
description=descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(annot_from_events)
###############################################################################
# Now, the annotations will appear automatically when plotting the raw data,
# and will be color-coded by their label value:
raw.plot(start=5, duration=5)
###############################################################################
# .. _`chunk-duration`:
#
# Making multiple events per annotation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As mentioned above, you can generate equally-spaced events from an
# :class:`~mne.Annotations` object using the ``chunk_duration`` parameter of
# :func:`~mne.events_from_annotations`. For example, suppose we have an
# annotation in our :class:`~mne.io.Raw` object indicating when the subject was
# in REM sleep, and we want to perform a resting-state analysis on those spans
# of data. We can create an Events array with a series of equally-spaced events
# within each "REM" span, and then use those events to generate (potentially
# overlapping) epochs that we can analyze further.
# create the REM annotations
rem_annot = mne.Annotations(onset=[5, 41],
duration=[16, 11],
description=['REM'] * 2)
raw.set_annotations(rem_annot)
(rem_events,
rem_event_dict) = mne.events_from_annotations(raw, chunk_duration=1.5)
###############################################################################
# Now we can check that our events indeed fall in the ranges 5-21 seconds and
# 41-52 seconds, and are ~1.5 seconds apart (modulo some jitter due to the
# sampling frequency). Here are the event times rounded to the nearest
# millisecond:
print(np.round((rem_events[:, 0] - raw.first_samp) / raw.info['sfreq'], 3))
###############################################################################
# Other examples of resting-state analysis can be found in the online
# documentation for :func:`mne.make_fixed_length_events`, such as
# :doc:`../../auto_examples/connectivity/plot_mne_inverse_envelope_correlation`.
#
# .. LINKS
#
# .. _`TTL`: https://en.wikipedia.org/wiki/Transistor%E2%80%93transistor_logic
|
gitreset/Data-Science-45min-Intros
|
refs/heads/master
|
python-logging-201/dog.py
|
26
|
import logging
from pet import Pet
logger = logging.getLogger("pet_world." + __name__)
class Dog(Pet):
def __init__(self, **kwargs):
# default values
self.name = "Fido"
self.word = "Arf!"
self.legs = 4
super(Dog,self).__init__(**kwargs)
self.logger = logger
self.logger.info("{} is ready for play!".format(self.name))
|
hughperkins/kgsgo-dataset-preprocessor
|
refs/heads/master
|
thirdparty/future/src/past/types/__init__.py
|
62
|
"""
Forward-ports of types from Python 2 for use with Python 3:
- ``basestring``: equivalent to ``(str, bytes)`` in ``isinstance`` checks
- ``dict``: with list-producing .keys() etc. methods
- ``str``: bytes-like, but iterating over them doesn't product integers
- ``long``: alias of Py3 int with ``L`` suffix in the ``repr``
- ``unicode``: alias of Py3 str with ``u`` prefix in the ``repr``
"""
from past import utils
if utils.PY2:
import __builtin__
basestring = __builtin__.basestring
dict = __builtin__.dict
str = __builtin__.str
long = __builtin__.long
unicode = __builtin__.unicode
__all__ = []
else:
from .basestring import basestring
from .olddict import olddict
from .oldstr import oldstr
long = int
unicode = str
# from .unicode import unicode
__all__ = ['basestring', 'olddict', 'oldstr', 'long', 'unicode']
|
jonathon-love/snapcraft
|
refs/heads/master
|
snapcraft/tests/test_plugin_make.py
|
2
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from unittest import mock
from testtools.matchers import HasLength
import snapcraft
from snapcraft import tests
from snapcraft.plugins import make
class MakePluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class Options:
makefile = None
make_parameters = []
make_install_var = 'DESTDIR'
disable_parallel = False
artifacts = []
self.options = Options()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_schema(self):
schema = make.MakePlugin.schema()
properties = schema['properties']
self.assertTrue('makefile' in properties,
'Expected "makefile" to be included in properties')
self.assertTrue(
'make-parameters' in properties,
'Expected "make-parameters" to be included in properties')
self.assertTrue(
'make-install-var' in properties,
'Expected "make-install-var" to be included in properties')
makefile = properties['makefile']
self.assertTrue('type' in makefile,
'Expected "type" to be included in "makefile"')
makefile_type = makefile['type']
self.assertEqual(makefile_type, 'string',
'Expected "makefile" "type" to be "string", but it '
'was "{}"'.format(makefile_type))
make_parameters = properties['make-parameters']
self.assertTrue('type' in make_parameters,
'Expected "type" to be included in "make-parameters"')
make_parameters_type = make_parameters['type']
self.assertEqual(
make_parameters_type, 'array',
'Expected "make-parameters" "type" to be "array", but it '
'was "{}"'.format(make_parameters_type))
make_install_var = properties['make-install-var']
self.assertTrue('type' in make_install_var,
'Expected "type" to be included in "make-install-var"')
make_install_var_type = make_install_var['type']
self.assertEqual(
make_install_var_type, 'string',
'Expected "make-install-var" "type" to be "string", but it '
'was "{}"'.format(make_install_var_type))
make_install_var_default = make_install_var['default']
self.assertEqual(
make_install_var_default, 'DESTDIR',
'Expected "make-install-var" "default" to be "DESTDIR", but it '
'was "{}"'.format(make_install_var_default))
def test_get_build_properties(self):
expected_build_properties = ['makefile', 'make-parameters',
'make-install-var', 'artifacts']
resulting_build_properties = make.MakePlugin.get_build_properties()
self.assertThat(resulting_build_properties,
HasLength(len(expected_build_properties)))
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
@mock.patch.object(make.MakePlugin, 'run')
def test_build(self, run_mock):
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_disable_parallel(self, run_mock):
self.options.disable_parallel = True
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j1'], env=None),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_makefile(self, run_mock):
self.options.makefile = 'makefile.linux'
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-f', 'makefile.linux', '-j2'], env=None),
mock.call(['make', '-f', 'makefile.linux', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
def test_build_install_var(self, run_mock):
self.options.make_install_var = 'PREFIX'
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
plugin.build()
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
mock.call(['make', 'install',
'PREFIX={}'.format(plugin.installdir)], env=None)
])
@mock.patch.object(make.MakePlugin, 'run')
@mock.patch('snapcraft.file_utils.link_or_copy_tree')
@mock.patch('snapcraft.file_utils.link_or_copy')
def test_build_artifacts(self, link_or_copy_mock,
link_or_copy_tree_mock, run_mock):
self.options.artifacts = ['dir_artifact', 'file_artifact']
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(os.path.join(plugin.builddir, 'dir_artifact'))
plugin.build()
self.assertEqual(1, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=None),
])
self.assertEqual(1, link_or_copy_mock.call_count)
link_or_copy_mock.assert_has_calls([
mock.call(
os.path.join(plugin.builddir, 'file_artifact'),
os.path.join(plugin.installdir, 'file_artifact'),
)])
self.assertEqual(1, link_or_copy_tree_mock.call_count)
link_or_copy_tree_mock.assert_has_calls([
mock.call(
os.path.join(plugin.builddir, 'dir_artifact'),
os.path.join(plugin.installdir, 'dir_artifact'),
)])
@mock.patch.object(make.MakePlugin, 'run')
def test_make_with_env(self, run_mock):
plugin = make.MakePlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
env = {'foo': 'bar'}
plugin.make(env=env)
self.assertEqual(2, run_mock.call_count)
run_mock.assert_has_calls([
mock.call(['make', '-j2'], env=env),
mock.call(['make', 'install',
'DESTDIR={}'.format(plugin.installdir)], env=env)
])
|
kaedroho/django
|
refs/heads/master
|
tests/nested_foreign_keys/tests.py
|
62
|
from django.test import TestCase
from .models import (
Event, Movie, Package, PackageNullFK, Person, Screening, ScreeningNullFK,
)
# These are tests for #16715. The basic scheme is always the same: 3 models with
# 2 relations. The first relation may be null, while the second is non-nullable.
# In some cases, Django would pick the wrong join type for the second relation,
# resulting in missing objects in the queryset.
#
# Model A
# | (Relation A/B : nullable)
# Model B
# | (Relation B/C : non-nullable)
# Model C
#
# Because of the possibility of NULL rows resulting from the LEFT OUTER JOIN
# between Model A and Model B (i.e. instances of A without reference to B),
# the second join must also be LEFT OUTER JOIN, so that we do not ignore
# instances of A that do not reference B.
#
# Relation A/B can either be an explicit foreign key or an implicit reverse
# relation such as introduced by one-to-one relations (through multi-table
# inheritance).
class NestedForeignKeysTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
cls.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=cls.director)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening')), 2)
# This failed.
self.assertEqual(len(Event.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
# Simple filter/exclude queries for good measure.
self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_inheritance_null_FK(self):
Event.objects.create()
ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk')), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk__movie')), 3)
self.assertEqual(len(Event.objects.values()), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__title')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk', 'screeningnullfk__movie__title')), 3)
self.assertEqual(Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2)
def test_null_exclude(self):
screening = ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(
list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)),
[screening])
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening')), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_explicit_ForeignKey_NullFK(self):
PackageNullFK.objects.create()
screening = ScreeningNullFK.objects.create(movie=None)
screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
PackageNullFK.objects.create(screening=screening)
PackageNullFK.objects.create(screening=screening_with_movie)
self.assertEqual(len(PackageNullFK.objects.all()), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening')), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening__movie')), 3)
self.assertEqual(len(PackageNullFK.objects.values()), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__title')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk', 'screening__movie__title')), 3)
self.assertEqual(PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2)
# Some additional tests for #16715. The only difference is the depth of the
# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
# checks if promotion of join types works for deeper nesting too.
class DeeplyNestedForeignKeysTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
cls.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=cls.director)
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Event.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Event.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie__director=self.director).count(), 1)
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Package.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Package.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie__director=self.director).count(), 1)
|
google/picatrix
|
refs/heads/main
|
picatrix/notebook_init.py
|
1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialization module for picatrix magics.
When starting a new notebook using picatrix it is enough to do
from picatrix import notebook_init
notebook_init.init()
And that would register magics and initialize the notebook to be able
to take advantage of picatrix magics and helper functions.
"""
# pylint: disable=unused-import
from picatrix import helpers
from picatrix import magics
from picatrix.lib import state
def init():
"""Initialize the notebook."""
# Initialize the state object.
_ = state.state()
|
nubark/odoo
|
refs/heads/9.0
|
addons/l10n_at/migrations/9.0.2.0/post-migrate_tags_on_taxes.py
|
536
|
from openerp.modules.registry import RegistryManager
def migrate(cr, version):
registry = RegistryManager.get(cr.dbname)
from openerp.addons.account.models.chart_template import migrate_tags_on_taxes
migrate_tags_on_taxes(cr, registry)
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_setcomps.py
|
201
|
doctests = """
########### Tests mostly copied from test_listcomps.py ############
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
{3}
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
{None}
########### Tests for various scoping corner cases ############
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
{4}
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
{4}
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
{2}
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
{4}
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
{4}
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
{2}
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import support
from test import test_setcomps
support.run_doctest(test_setcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_doctest(test_setcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
perezg/infoxchange
|
refs/heads/master
|
BASE/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/operations.py
|
100
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
# TODO: Raise helpful exceptions as they become known.
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, six.string_types),
'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian' : PostGISDistance(prefix, operator),
'sphere' : PostGISSphereDistance(prefix, operator),
'spheroid' : PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
'dwithin' : (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
}
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby' : self.geometry_functions['coveredby'],
'covers' : self.geometry_functions['covers'],
'intersects' : self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps' : PostGISOperator('&&'),
}
# Native geometry type support added in PostGIS 2.0.
if version >= (2, 0, 0):
self.geometry = True
# Creating a dictionary lookup of all GIS terms for PostGIS.
gis_terms = ['isnull']
gis_terms += list(self.geometry_operators)
gis_terms += list(self.geometry_functions)
self.gis_terms = dict([(term, None) for term in gis_terms])
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points =prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
if version >= (2, 0, 0):
self.extent3d = prefix + '3DExtent'
self.length3d = prefix + '3DLength'
self.perimeter3d = prefix + '3DPerimeter'
else:
self.extent3d = prefix + 'Extent3D'
self.length3d = prefix + 'Length3D'
self.perimeter3d = prefix + 'Perimeter3D'
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)'% (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % self.get_expression_column(value)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s()' % func)
row = cursor.fetchone()
except:
# Responsibility of callers to perform error handling.
raise
finally:
# Close out the connection. See #9437.
self.connection.close()
return row[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np): return np == 2
def two_to_three(np): return np >= 2 and np <=3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.postgis.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
return SpatialRefSys
|
opinkerfi/adagios
|
refs/heads/master
|
adagios/objectbrowser/urls.py
|
3
|
# Adagios is a web based Nagios configuration interface
#
# Copyright (C) 2014, Pall Sigurdsson <palli@opensource.is>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, patterns
urlpatterns = patterns('adagios',
url(r'^/$', 'objectbrowser.views.list_object_types', name="objectbrowser"),
url(r'^/edit_all/(?P<object_type>.+)/(?P<attribute_name>.+)/?$', 'objectbrowser.views.edit_all'),
url(r'^/search/?$', 'objectbrowser.views.search_objects', name="search"),
url(r'^/edit/(?P<object_id>.+?)?$', 'objectbrowser.views.edit_object', name="edit_object"),
url(r'^/import/?$', 'objectbrowser.views.import_objects'),
url(r'^/edit/?$', 'objectbrowser.views.edit_object'),
url(r'^/copy_and_edit/(?P<object_id>.+?)?$', 'objectbrowser.views.copy_and_edit_object'),
url(r'^/copy/(?P<object_id>.+)$', 'objectbrowser.views.copy_object', name="copy_object"),
url(r'^/delete/(?P<object_id>.+)$', 'objectbrowser.views.delete_object', name="delete_object"),
url(r'^/delete/(?P<object_type>.+?)/(?P<shortname>.+)/?$', 'objectbrowser.views.delete_object_by_shortname', name="delete_by_shortname"),
url(r'^/add/(?P<object_type>.+)$', 'objectbrowser.views.add_object', name="addobject"),
url(r'^/bulk_edit/?$', 'objectbrowser.views.bulk_edit', name='bulk_edit'),
url(r'^/bulk_delete/?$', 'objectbrowser.views.bulk_delete', name='bulk_delete'),
url(r'^/bulk_copy/?$', 'objectbrowser.views.bulk_copy', name='bulk_copy'),
url(r'^/add_to_group/(?P<group_type>.+)/(?P<group_name>.+)/?$', 'objectbrowser.views.add_to_group'),
url(r'^/add_to_group/(?P<group_type>.+)/?$', 'objectbrowser.views.add_to_group'),
url(r'^/add_to_group', 'objectbrowser.views.add_to_group'),
url(r'^/plugins/?$', 'objectbrowser.views.show_plugins'),
url(r'^/nagios.cfg/?$', 'objectbrowser.views.edit_nagios_cfg'),
url(r'^/nagios.cfg/edit/?$', 'misc.views.edit_nagios_cfg'),
url(r'^/geek_edit/id=(?P<object_id>.+)$', 'objectbrowser.views.geek_edit'),
url(r'^/advanced_edit/id=(?P<object_id>.+)$', 'objectbrowser.views.advanced_edit'),
# Here for backwards compatibility.
url(r'^/edit/id=(?P<object_id>.+)$', 'objectbrowser.views.edit_object', ),
url(r'^/id=(?P<object_id>.+)$', 'objectbrowser.views.edit_object', ),
# These should be deprecated as of 2012-08-27
url(r'^/copy_object/id=(?P<object_id>.+)$', 'objectbrowser.views.copy_object'),
url(r'^/delete_object/id=(?P<object_id>.+)$', 'objectbrowser.views.delete_object'),
)
|
TxBlackWolf/My-Gray-Hacker-Resources
|
refs/heads/master
|
Network_and_802.11/802.11/wifiReader.py
|
4
|
#!/usr/bin/env python
# simple python script to boost txpower and spoof the
# mac address of your wireless interface
# copied from the internet, i lost the reference
import sys
import os
import time
class Colors:
GREEN = '\033[92m'
Yellow = '\033[93m'
ENDC = '\033[0m'
def cls():
os.system(['clear', 'cls'][os.name == 'nt'])
def show_ifaces():
cls()
print('<-------------------Available Interfaces------------------->')
os.system('airmon-ng > /var/tmp/wifi.txt')
with open('/var/tmp/wifi.txt', 'r') as f:
for line in f:
if line.startswith('wl') or line.startswith('mo'):
print(line, end=' ')
def change_mac(option2):
if option2 == '1':
os.system('ifconfig ' + iface + ' down')
os.system('macchanger -m 00:11:22:33:44:55 ' + iface)
os.system('ifconfig ' + iface + ' up')
time.sleep(2.5)
elif option2 == '2':
os.system('ifconfig ' + iface + ' down')
os.system('macchanger -r ' + iface)
os.system('ifconfig ' + iface + ' up')
time.sleep(2.5)
elif option2 == '3':
os.system('ifconfig ' + iface + ' down')
os.system('macchanger -p ' + iface)
os.system('ifconfig ' + iface + ' up')
time.sleep(2.5)
elif option2 == '4':
print('')
newmac = input('Address to use: ')
os.system('ifconfig ' + iface + ' down')
os.system('macchanger -m ' + newmac + ' ' + iface)
os.system('ifconfig ' + iface + ' up')
time.sleep(2.5)
else:
print('')
print('Invalid option')
time.sleep(0.75)
dbm = ''
iface = ''
while True:
if len(iface) > 1:
os.system('iwconfig ' + iface + ' > /var/tmp/wifi2.txt')
with open('/var/tmp/wifi2.txt') as f:
for line in f:
if '=' in line:
temp = line.partition('=')
temp2 = temp[2]
dbm = temp2[0:3]
show_ifaces()
print('')
print('1) select IFACE ' + Colors.Yellow + 'IFACE:' + iface + ' ' + 'dBm:' + dbm + Colors.ENDC)
print('2) start monitor mode')
print('3) boost txpower(30dBm)')
print('4) spoof mac address')
print('5) exit')
option = input('Please choose a menu number: ')
if option == '1':
cls()
show_ifaces()
print('')
iface = input('which interface would you like to use? ')
elif option == '2':
cls()
os.system('airmon-ng start ' + iface + ' > /var/tmp/wifi1.txt')
with open('/var/tmp/wifi1.txt', 'r') as f:
for line in f:
if 'monitor' in line:
temp = line.partition(' on')
temp2 = temp[2]
temp3 = temp2[1:5]
iface = temp3
time.sleep(0.5)
elif option == '3':
cls()
time.sleep(1)
os.system('iw reg set BO')
time.sleep(2)
os.system('iwconfig ' + iface + ' txpower 30')
elif option == '4':
cls()
print('1) use 00:11:22:33:44:55')
print('2) use random')
print('3) revert to permanent')
print('4) pick an address')
print('')
option2 = input('Please choose a menu number: ')
cls()
change_mac(option2)
elif option == '5':
os.system('rm /var/tmp/wifi*.txt')
sys.exit()
else:
cls()
print('Invalid option')
time.sleep(1)
|
birdonwheels5/p2pool-saffronSha
|
refs/heads/master
|
p2pool/util/switchprotocol.py
|
280
|
from twisted.internet import protocol
class FirstByteSwitchProtocol(protocol.Protocol):
p = None
def dataReceived(self, data):
if self.p is None:
if not data: return
serverfactory = self.factory.first_byte_to_serverfactory.get(data[0], self.factory.default_serverfactory)
self.p = serverfactory.buildProtocol(self.transport.getPeer())
self.p.makeConnection(self.transport)
self.p.dataReceived(data)
def connectionLost(self, reason):
if self.p is not None:
self.p.connectionLost(reason)
class FirstByteSwitchFactory(protocol.ServerFactory):
protocol = FirstByteSwitchProtocol
def __init__(self, first_byte_to_serverfactory, default_serverfactory):
self.first_byte_to_serverfactory = first_byte_to_serverfactory
self.default_serverfactory = default_serverfactory
def startFactory(self):
for f in list(self.first_byte_to_serverfactory.values()) + [self.default_serverfactory]:
f.doStart()
def stopFactory(self):
for f in list(self.first_byte_to_serverfactory.values()) + [self.default_serverfactory]:
f.doStop()
|
mkolar/pyblish-ftrack
|
refs/heads/master
|
pyblish_ftrack/plugins/collect_context_version.py
|
1
|
import os
import sys
import re
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# import pyblish_ftrack_utils
import pyblish.api
@pyblish.api.log
class CollectContextVersion(pyblish.api.ContextPlugin):
"""Finds version in the filename or passes the one found in the context
Arguments:
version (int, optional): version number of the publish
"""
order = pyblish.api.CollectorOrder + 0.1
hosts = ['*']
def process(self, context):
# Get version number
if 'version' not in context.data:
directory, filename = os.path.split(context.data['currentFile'])
try:
prefix, version = self.version_get(filename, 'v')
context.data['version'] = int(version)
except ValueError:
self.log.warning('Cannot find version string in filename.')
return None
else:
context.data['version'] = int(context.data['version'])
self.log.info('Publish Version: {}'.format(context.data['version']))
def version_get(self, string, prefix):
"""Extract version information from filenames. Code from Foundry's
nukescripts.version_get()"""
if string is None:
raise ValueError("Empty version string - no match")
regex = "[/_.]"+prefix+"\d+"
matches = re.findall(regex, string, re.IGNORECASE)
if not len(matches):
msg = "No \"_"+prefix+"#\" found in \""+string+"\""
raise ValueError(msg)
return matches[-1:][0][1], re.search("\d+", matches[-1:][0]).group()
|
shl198/Pipeline
|
refs/heads/master
|
DreamChallenge/06_human_provean.py
|
2
|
import pandas as pd
import gffutils
import re
from natsort import natsorted
import os
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('ggplot')
def tr2pr(tr,ensembl_trpr,ncbi_trpr):
if tr.startswith('ENS'):
if '_' in tr:
tr = '_'.join(tr.split('_')[:-1])
if tr in ensembl_trpr:
return ensembl_trpr[tr]
else:
return tr
else:
if '.' in tr:
tr = tr.split('.')[0]
if tr in ncbi_trpr:
return ncbi_trpr[tr]
else:
return tr
def parse_mutation(mutation):
trim = mutation[2:]
if trim == '?':
return ['?']*3
res = re.findall('[^\d_]+|\d+',trim)
ref = res[0]
pos = res[1]
alt = res[2]
if 'del' in res[-1]:
ref = res[-1][3:]
alt = '.'
return [pos,ref,alt]
def remove_version(access):
access= str(access)
if '.' in access:
res = access.split('.')[0]
else:
res = access
return res
gene2ref = '/data/shangzhong/Database/human/141026gene2refseq.human.txt'
mutFile = '/data/shangzhong/Dream/mutations.csv'
"""
# 1. build transcript to protein dictionary
# ensembl annotation dictionary
gffFile = '/data/shangzhong/Dream/Homo_sapiens.GRCh37.82.gff3'
handle = open(gffFile,'r')
tr_pr = {} # {tr:pr}
for line in handle:
if line.startswith('#'):
continue
item = line[:-1].split('\t')
if item[2]=='CDS':
anno = item[-1]
index = anno.index(';')
pr = anno[7:index] # len('ID=CDS:')
anno = anno[index+1:]
index = anno.index(';')
tr = anno[len('Parent=transcript:'):index]
if tr not in tr_pr:
tr_pr[tr] = pr
# ncbi dictionary
gene2ref = '/data/shangzhong/Database/human/141026gene2refseq.human.txt'
g_ref_df = pd.read_csv(gene2ref,sep='\t',header=None,names=['tr','pr'])
g_ref_df = g_ref_df.drop_duplicates()
g_ref_df['new_tr'] = g_ref_df['tr'].map(lambda x: remove_version(x))
g_ref_df['new_pr'] = g_ref_df['pr'].map(lambda x: remove_version(x))
ncbi_trpr = g_ref_df.set_index('new_tr')['new_pr'].to_dict()
# 2. get the input for provean
mutFile = '/data/shangzhong/Dream/mutations.csv'
mut_df = pd.read_csv(mutFile,header=0,usecols=[1,14],names=['transcript','mut'])
mut_df['protein'] = mut_df['transcript'].map(lambda x: tr2pr(x,tr_pr,ncbi_trpr))
mut_df['mutation'] = mut_df['mut'].map(lambda row: parse_mutation(row))
out = open('/data/shangzhong/Dream/protein_mutation.csv','w')
for row in mut_df.itertuples(index=False):
out.write(row[2]+','+','.join(row[3])+'\n')
out.close()
"""
# # 3. merge provean with the origional
# path = '/data/shangzhong/Dream'
# os.chdir(path)
# files = [f for f in os.listdir(path) if f.endswith('.tsv')]
# files = natsorted(files)
# dfs = []
# for f in files:
# df = pd.read_csv(f,header=None,sep='\t',usecols=[2,6,7],names=['protein','provean_score','effect'],skiprows=[0])
# dfs.append(df)
# merge_df = pd.concat(dfs,ignore_index=True)
#
# mut_df = pd.read_csv(mutFile,header=0)
# res = pd.concat([mut_df,merge_df],axis=1)
# res.to_csv('/data/shangzhong/Dream/anno_mut.csv',index=False)
# # Score = 3 * ln(X * e^(M-u))
# exp_file = '/data/shangzhong/Dream/Molecular_Exp.csv'
# mut_file = '/data/shangzhong/Dream/Molecular_Mut.csv'
# exp_df = pd.read_csv(exp_file,header=0,index_col=0)
# print exp_df
# gene1 = exp_df.loc[:,'22RV1']
# gene1 = gene1[gene1>0]
# gene1.hist()
# print gene1
# plt.show()
# #mut_file = pd.read_csv(mut_file,header=0,index_col=0)
# #score = 3 * ((exp_df.loc[:,:] * (mut_file['22RV1']+2.5)).apply(np.exp)).apply(np.log2)
# print 'done'
#===============================================================================
# plot correlation between cnv data and expression data
#===============================================================================
# sample = '22RV1'
# f1 = '/data/shangzhong/Dream/gex.csv'
# df1 = pd.read_csv(f1,header=0)
# df1 = df1[['symbol',sample]]
# dic1 = df1.set_index('symbol')[sample].to_dict()
# df1 = pd.DataFrame.from_dict(dic1,orient='index')
# df1.columns = [sample+'_1']
#
# f2 = '/data/shangzhong/Dream/CNV_all.csv'
# df2 = pd.read_csv(f2,header=0)
# df2 = df2[['symbol',sample]]
# dic2 = df2.set_index('symbol')[sample].to_dict()
# df2 = pd.DataFrame.from_dict(dic2,orient='index')
# df2.columns = [sample + '_2']
#
# df = pd.concat([df1,df2],axis=1)
# df = df.dropna()
# plt.scatter(df['22RV1_1'],df['22RV1_2'])
# plt.show()
# print 'done'
#===============================================================================
# run codra
#===============================================================================
import subprocess
path = '/data/shangzhong/Dream/CORDA'
os.chdir('/data/shangzhong/Dream/CORDA')
num = range(72,83,2)
num = num[:-1]
cmd = ''
for n in num:
start=n+1
if n ==80:
end=83
else:
end = n+2
cmd = cmd + ('matlab -r \"createDREAMmodels({start},{end})\" & ').format(start=start,end=end)
subprocess.call(cmd,shell=True)
# # # merge the results
# path = '/data/shangzhong/Dream/CORDA'
# os.chdir(path)
# files = [f for f in os.listdir(path) if f.startswith('MetabolicCapabilities')]
# files = natsorted(files)
# res_df = pd.read_csv(files[0],header=0)
# for f in files[1:]:
# df = pd.read_csv(f,header=0)
# res_df = res_df + df
# res_df.to_csv(path + '/merge_metaCap.csv',index=False)
#===============================================================================
# merge CNV and mutation
#===============================================================================
# cnvFile = '/data/shangzhong/Dream/Molecular_CNV.csv'
# cnv_df = pd.read_csv(cnvFile,header=0,index_col=0)
# cnv_df[cnv_df<0] = 0
#
# mutFile = '/data/shangzhong/Dream/Molecular_Mut.csv'
# mut_df = pd.read_csv(mutFile,header=0,index_col=0)
# mut_df[mut_df==-100] = 3
#
# res_df = cnv_df + mut_df
# res_df.to_csv('/data/shangzhong/Dream/cnv_mut.csv')
#===============================================================================
# imputate the lipinski score
#===============================================================================
# fn = '/data/shangzhong/Dream/Drug_info_release.csv'
# df = pd.read_csv(fn,header=0)
# lipinski = []
# for row in df.itertuples():
# res = 0
# if row[3] > 10: res = res + 1
# if row[4] > 5: res = res + 1
# if row[5] > 5: res = res + 1
# if row[-1] > 500: res = res + 1
# lipinski.append(res)
# df['Lipinski'] = lipinski
# df.to_csv('/data/shangzhong/Dream/Drug_info.csv',index=False)
|
openqt/algorithms
|
refs/heads/master
|
leetcode/python/lc409-longest-palindrome.py
|
1
|
# coding=utf-8
import unittest
"""409. Longest Palindrome
https://leetcode.com/problems/longest-palindrome/description/
Given a string which consists of lowercase or uppercase letters, find the
length of the longest palindromes that can be built with those letters.
This is case sensitive, for example `"Aa"` is not considered a palindrome
here.
**Note:**
Assume the length of given string will not exceed 1,010.
**Example:**
Input:
"abccccdd"
Output:
7
Explanation:
One longest palindrome that can be built is "dccaccd", whose length is 7.
Similar Questions:
Palindrome Permutation (palindrome-permutation)
"""
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
he7d3r/ores
|
refs/heads/master
|
ores/lock_manager/__init__.py
|
2
|
from .ip_range_list import IpRangeList
from .lock_manager import LockManager
from .poolcounter import PoolCounter
__all__ = [IpRangeList, LockManager, PoolCounter]
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/words/toctap.py
|
54
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support module for making TOC servers with twistd.
"""
from twisted.words.protocols import toc
from twisted.python import usage
from twisted.application import strports
class Options(usage.Options):
synopsis = "[-p <port>]"
optParameters = [["port", "p", "5190"]]
longdesc = "Makes a TOC server."
def makeService(config):
return strports.service(config['port'], toc.TOCFactory())
|
qizdyyt/CPLogProgram
|
refs/heads/master
|
HebcaLog/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/mac/gyptest-xctest.py
|
221
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
|
jayhetee/coveragepy
|
refs/heads/master
|
tests/try_execfile.py
|
3
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test file for run_python_file.
This file is executed two ways::
$ coverage run try_execfile.py
and::
$ python try_execfile.py
The output is compared to see that the program execution context is the same
under coverage and under Python.
It is not crucial that the execution be identical, there are some differences
that are OK. This program canonicalizes the output to gloss over those
differences and get a clean diff.
"""
import json, os, sys
# sys.path varies by execution environments. Coverage.py uses setuptools to
# make console scripts, which means pkg_resources is imported. pkg_resources
# removes duplicate entries from sys.path. So we do that too, since the extra
# entries don't affect the running of the program.
def same_file(p1, p2):
"""Determine if `p1` and `p2` refer to the same existing file."""
if not p1:
return not p2
if not os.path.exists(p1):
return False
if not os.path.exists(p2):
return False
if hasattr(os.path, "samefile"):
return os.path.samefile(p1, p2)
else:
norm1 = os.path.normcase(os.path.normpath(p1))
norm2 = os.path.normcase(os.path.normpath(p2))
return norm1 == norm2
def without_same_files(filenames):
"""Return the list `filenames` with duplicates (by same_file) removed."""
reduced = []
for filename in filenames:
if not any(same_file(filename, other) for other in reduced):
reduced.append(filename)
return reduced
cleaned_sys_path = [os.path.normcase(p) for p in without_same_files(sys.path)]
DATA = "xyzzy"
import __main__
def my_function(a):
"""A function to force execution of module-level values."""
return "my_fn(%r)" % a
FN_VAL = my_function("fooey")
loader = globals().get('__loader__')
fullname = getattr(loader, 'fullname', None) or getattr(loader, 'name', None)
globals_to_check = {
'__name__': __name__,
'__file__': __file__,
'__doc__': __doc__,
'__builtins__.has_open': hasattr(__builtins__, 'open'),
'__builtins__.dir': dir(__builtins__),
'__loader__ exists': loader is not None,
'__loader__.fullname': fullname,
'__package__': __package__,
'DATA': DATA,
'FN_VAL': FN_VAL,
'__main__.DATA': getattr(__main__, "DATA", "nothing"),
'argv': sys.argv,
'path': cleaned_sys_path,
}
print(json.dumps(globals_to_check, indent=4, sort_keys=True))
|
sperez4mba/pyNES
|
refs/heads/0.1.x
|
pynes/tests/ldx_test.py
|
28
|
# -*- coding: utf-8 -*-
'''
LDX, Load Register X
This is one of the memory operations on the 6502
'''
import unittest
from pynes.compiler import lexical, syntax, semantic
class LdxTest(unittest.TestCase):
def test_ldx_imm(self):
tokens = list(lexical('LDX #$10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_HEX_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xa2, 0x10])
def test_ldx_imm_with_decimal(self):
tokens = list(lexical('LDX #10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_DECIMAL_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xa2, 0x0a])
def test_ldx_imm_with_binary(self):
tokens = list(lexical('LDX #%00000100'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_BINARY_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xa2, 0x04])
def test_ldx_zp(self):
tokens = list(lexical('LDX $00'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xa6, 0x00])
def test_ldx_zpy(self):
tokens = list(lexical('LDX $10,Y'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE_Y', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xb6, 0x10])
def test_ldx_abs(self):
tokens = list(lexical('LDX $1234'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xae, 0x34, 0x12])
def test_ldx_absy(self):
tokens = list(lexical('LDX $1234,Y'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE_Y', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0xbe, 0x34, 0x12])
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
tests/components/dynalite/conftest.py
|
6
|
"""dynalite conftest."""
from tests.components.light.conftest import mock_light_profiles # noqa: F401
|
libracore/erpnext
|
refs/heads/v12
|
erpnext/healthcare/doctype/patient_appointment/patient_appointment.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from frappe.utils import getdate, add_days, get_time
from frappe import _
import datetime
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from erpnext.hr.doctype.employee.employee import is_holiday
from erpnext.healthcare.doctype.healthcare_settings.healthcare_settings import get_receivable_account,get_income_account
from erpnext.healthcare.utils import validity_exists, service_item_and_practitioner_charge
class PatientAppointment(Document):
def on_update(self):
today = datetime.date.today()
appointment_date = getdate(self.appointment_date)
# If appointment created for today set as open
if today == appointment_date:
frappe.db.set_value("Patient Appointment", self.name, "status", "Open")
self.reload()
def validate(self):
end_time = datetime.datetime.combine(getdate(self.appointment_date), get_time(self.appointment_time)) + datetime.timedelta(minutes=float(self.duration))
overlaps = frappe.db.sql("""
select
name, practitioner, patient, appointment_time, duration
from
`tabPatient Appointment`
where
appointment_date=%s and name!=%s and status NOT IN ("Closed", "Cancelled")
and (practitioner=%s or patient=%s) and
((appointment_time<%s and appointment_time + INTERVAL duration MINUTE>%s) or
(appointment_time>%s and appointment_time<%s) or
(appointment_time=%s))
""", (self.appointment_date, self.name, self.practitioner, self.patient,
self.appointment_time, end_time.time(), self.appointment_time, end_time.time(), self.appointment_time))
if overlaps:
frappe.throw(_("""Appointment overlaps with {0}.<br> {1} has appointment scheduled
with {2} at {3} having {4} minute(s) duration.""").format(overlaps[0][0], overlaps[0][1], overlaps[0][2], overlaps[0][3], overlaps[0][4]))
def after_insert(self):
if self.procedure_prescription:
frappe.db.set_value("Procedure Prescription", self.procedure_prescription, "appointment_booked", True)
if self.procedure_template:
comments = frappe.db.get_value("Procedure Prescription", self.procedure_prescription, "comments")
if comments:
frappe.db.set_value("Patient Appointment", self.name, "notes", comments)
# Check fee validity exists
appointment = self
validity_exist = validity_exists(appointment.practitioner, appointment.patient)
if validity_exist:
fee_validity = frappe.get_doc("Fee Validity", validity_exist[0][0])
# Check if the validity is valid
appointment_date = getdate(appointment.appointment_date)
if (fee_validity.valid_till >= appointment_date) and (fee_validity.visited < fee_validity.max_visit):
visited = fee_validity.visited + 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
if fee_validity.ref_invoice:
frappe.db.set_value("Patient Appointment", appointment.name, "invoiced", True)
frappe.msgprint(_("{0} has fee validity till {1}").format(appointment.patient, fee_validity.valid_till))
confirm_sms(self)
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1' and \
frappe.db.get_value("Patient Appointment", self.name, "invoiced") != 1:
invoice_appointment(self)
@frappe.whitelist()
def invoice_appointment(appointment_doc):
if not appointment_doc.name:
return False
sales_invoice = frappe.new_doc("Sales Invoice")
sales_invoice.customer = frappe.get_value("Patient", appointment_doc.patient, "customer")
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item_line = sales_invoice.append("items")
service_item, practitioner_charge = service_item_and_practitioner_charge(appointment_doc)
item_line.item_code = service_item
item_line.description = "Consulting Charges: " + appointment_doc.practitioner
item_line.income_account = get_income_account(appointment_doc.practitioner, appointment_doc.company)
item_line.rate = practitioner_charge
item_line.amount = practitioner_charge
item_line.qty = 1
item_line.reference_dt = "Patient Appointment"
item_line.reference_dn = appointment_doc.name
payments_line = sales_invoice.append("payments")
payments_line.mode_of_payment = appointment_doc.mode_of_payment
payments_line.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate = True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_("Sales Invoice {0} created as paid".format(sales_invoice.name)), alert=True)
def appointment_cancel(appointment_id):
appointment = frappe.get_doc("Patient Appointment", appointment_id)
# If invoiced --> fee_validity update with -1 visit
if appointment.invoiced:
sales_invoice = exists_sales_invoice(appointment)
if sales_invoice and cancel_sales_invoice(sales_invoice):
frappe.msgprint(
_("Appointment {0} and Sales Invoice {1} cancelled".format(appointment.name, sales_invoice.name))
)
else:
validity = validity_exists(appointment.practitioner, appointment.patient)
if validity:
fee_validity = frappe.get_doc("Fee Validity", validity[0][0])
if appointment_valid_in_fee_validity(appointment, fee_validity.valid_till, True, fee_validity.ref_invoice):
visited = fee_validity.visited - 1
frappe.db.set_value("Fee Validity", fee_validity.name, "visited", visited)
frappe.msgprint(
_("Appointment cancelled, Please review and cancel the invoice {0}".format(fee_validity.ref_invoice))
)
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
else:
frappe.msgprint(_("Appointment cancelled"))
def appointment_valid_in_fee_validity(appointment, valid_end_date, invoiced, ref_invoice):
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_start_date = add_days(getdate(valid_end_date), -int(valid_days))
# Appointments which has same fee validity range with the appointment
appointments = frappe.get_list("Patient Appointment",{'patient': appointment.patient, 'invoiced': invoiced,
'appointment_date':("<=", getdate(valid_end_date)), 'appointment_date':(">=", getdate(valid_start_date)),
'practitioner': appointment.practitioner}, order_by="appointment_date desc", limit=int(max_visit))
if appointments and len(appointments) > 0:
appointment_obj = appointments[len(appointments)-1]
sales_invoice = exists_sales_invoice(appointment_obj)
if sales_invoice.name == ref_invoice:
return True
return False
def cancel_sales_invoice(sales_invoice):
if frappe.db.get_value("Healthcare Settings", None, "manage_appointment_invoice_automatically") == '1':
if len(sales_invoice.items) == 1:
sales_invoice.cancel()
return True
return False
def exists_sales_invoice_item(appointment):
return frappe.db.exists(
"Sales Invoice Item",
{
"reference_dt": "Patient Appointment",
"reference_dn": appointment.name
}
)
def exists_sales_invoice(appointment):
sales_item_exist = exists_sales_invoice_item(appointment)
if sales_item_exist:
sales_invoice = frappe.get_doc("Sales Invoice", frappe.db.get_value("Sales Invoice Item", sales_item_exist, "parent"))
return sales_invoice
return False
@frappe.whitelist()
def get_availability_data(date, practitioner):
"""
Get availability data of 'practitioner' on 'date'
:param date: Date to check in schedule
:param practitioner: Name of the practitioner
:return: dict containing a list of available slots, list of appointments and time of appointments
"""
date = getdate(date)
weekday = date.strftime("%A")
available_slots = []
slot_details = []
practitioner_schedule = None
employee = None
practitioner_obj = frappe.get_doc("Healthcare Practitioner", practitioner)
# Get practitioner employee relation
if practitioner_obj.employee:
employee = practitioner_obj.employee
elif practitioner_obj.user_id:
if frappe.db.exists({
"doctype": "Employee",
"user_id": practitioner_obj.user_id
}):
employee = frappe.get_doc("Employee", {"user_id": practitioner_obj.user_id}).name
if employee:
# Check if it is Holiday
if is_holiday(employee, date):
frappe.throw(_("{0} is a company holiday".format(date)))
# Check if He/She on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
frappe.throw(_("{0} on Half day Leave on {1}").format(practitioner, date))
else:
frappe.throw(_("{0} on Leave on {1}").format(practitioner, date))
# get practitioners schedule
if practitioner_obj.practitioner_schedules:
for schedule in practitioner_obj.practitioner_schedules:
if schedule.schedule:
practitioner_schedule = frappe.get_doc("Practitioner Schedule", schedule.schedule)
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if practitioner_schedule:
available_slots = []
for t in practitioner_schedule.time_slots:
if weekday == t.day:
available_slots.append(t)
if available_slots:
appointments = []
if schedule.service_unit:
slot_name = schedule.schedule+" - "+schedule.service_unit
allow_overlap = frappe.get_value('Healthcare Service Unit', schedule.service_unit, 'overlap_appointments')
if allow_overlap:
# fetch all appointments to practitioner by service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
# fetch all appointments to service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"service_unit": schedule.service_unit, "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
else:
slot_name = schedule.schedule
# fetch all appointments to practitioner without service unit
appointments = frappe.get_all(
"Patient Appointment",
filters={"practitioner": practitioner, "service_unit": '', "appointment_date": date, "status": ["not in",["Cancelled"]]},
fields=["name", "appointment_time", "duration", "status"])
slot_details.append({"slot_name":slot_name, "service_unit":schedule.service_unit,
"avail_slot":available_slots, 'appointments': appointments})
else:
frappe.throw(_("{0} does not have a Healthcare Practitioner Schedule. Add it in Healthcare Practitioner master".format(practitioner)))
if not available_slots and not slot_details:
# TODO: return available slots in nearby dates
frappe.throw(_("Healthcare Practitioner not available on {0}").format(weekday))
return {
"slot_details": slot_details
}
@frappe.whitelist()
def update_status(appointment_id, status):
frappe.db.set_value("Patient Appointment", appointment_id, "status", status)
appointment_booked = True
if status == "Cancelled":
appointment_booked = False
appointment_cancel(appointment_id)
procedure_prescription = frappe.db.get_value("Patient Appointment", appointment_id, "procedure_prescription")
if procedure_prescription:
frappe.db.set_value("Procedure Prescription", procedure_prescription, "appointment_booked", appointment_booked)
@frappe.whitelist()
def set_open_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Open' where status = 'Scheduled'"
" and appointment_date = %s", today)
@frappe.whitelist()
def set_pending_appointments():
today = getdate()
frappe.db.sql(
"update `tabPatient Appointment` set status='Pending' where status in "
"('Scheduled','Open') and appointment_date < %s", today)
def confirm_sms(doc):
if frappe.db.get_value("Healthcare Settings", None, "app_con") == '1':
message = frappe.db.get_value("Healthcare Settings", None, "app_con_msg")
send_message(doc, message)
@frappe.whitelist()
def create_encounter(appointment):
appointment = frappe.get_doc("Patient Appointment", appointment)
encounter = frappe.new_doc("Patient Encounter")
encounter.appointment = appointment.name
encounter.patient = appointment.patient
encounter.practitioner = appointment.practitioner
encounter.visit_department = appointment.department
encounter.patient_sex = appointment.patient_sex
encounter.encounter_date = appointment.appointment_date
if appointment.invoiced:
encounter.invoiced = True
return encounter.as_dict()
def remind_appointment():
if frappe.db.get_value("Healthcare Settings", None, "app_rem") == '1':
rem_before = datetime.datetime.strptime(frappe.get_value("Healthcare Settings", None, "rem_before"), "%H:%M:%S")
rem_dt = datetime.datetime.now() + datetime.timedelta(
hours=rem_before.hour, minutes=rem_before.minute, seconds=rem_before.second)
appointment_list = frappe.db.sql(
"select name from `tabPatient Appointment` where start_dt between %s and %s and reminded = 0 ",
(datetime.datetime.now(), rem_dt)
)
for i in range(0, len(appointment_list)):
doc = frappe.get_doc("Patient Appointment", appointment_list[i][0])
message = frappe.db.get_value("Healthcare Settings", None, "app_rem_msg")
send_message(doc, message)
frappe.db.set_value("Patient Appointment", doc.name, "reminded",1)
def send_message(doc, message):
patient = frappe.get_doc("Patient", doc.patient)
if patient.mobile:
context = {"doc": doc, "alert": doc, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
# jinja to string convertion happens here
message = frappe.render_template(message, context)
number = [patient.mobile]
send_sms(number, message)
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Patient Appointment", filters)
data = frappe.db.sql("""
select
`tabPatient Appointment`.name, `tabPatient Appointment`.patient,
`tabPatient Appointment`.practitioner, `tabPatient Appointment`.status,
`tabPatient Appointment`.duration,
timestamp(`tabPatient Appointment`.appointment_date, `tabPatient Appointment`.appointment_time) as 'start',
`tabAppointment Type`.color
from
`tabPatient Appointment`
left join `tabAppointment Type` on `tabPatient Appointment`.appointment_type=`tabAppointment Type`.name
where
(`tabPatient Appointment`.appointment_date between %(start)s and %(end)s)
and `tabPatient Appointment`.status != 'Cancelled' and `tabPatient Appointment`.docstatus < 2 {conditions}""".format(conditions=conditions),
{"start": start, "end": end}, as_dict=True, update={"allDay": 0})
for item in data:
item.end = item.start + datetime.timedelta(minutes = item.duration)
return data
@frappe.whitelist()
def get_procedure_prescribed(patient):
return frappe.db.sql("""select pp.name, pp.procedure, pp.parent, ct.practitioner,
ct.encounter_date, pp.practitioner, pp.date, pp.department
from `tabPatient Encounter` ct, `tabProcedure Prescription` pp
where ct.patient='{0}' and pp.parent=ct.name and pp.appointment_booked=0
order by ct.creation desc""".format(patient))
|
bhamza/ntu-dsi-dcn
|
refs/heads/master
|
src/flow-monitor/examples/flowmon-parse-results.py
|
75
|
from __future__ import division
import sys
import os
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
def parse_time_ns(tm):
if tm.endswith('ns'):
return long(tm[:-2])
raise ValueError(tm)
class FiveTuple(object):
__slots__ = ['sourceAddress', 'destinationAddress', 'protocol', 'sourcePort', 'destinationPort']
def __init__(self, el):
self.sourceAddress = el.get('sourceAddress')
self.destinationAddress = el.get('destinationAddress')
self.sourcePort = int(el.get('sourcePort'))
self.destinationPort = int(el.get('destinationPort'))
self.protocol = int(el.get('protocol'))
class Histogram(object):
__slots__ = 'bins', 'nbins', 'number_of_flows'
def __init__(self, el=None):
self.bins = []
if el is not None:
#self.nbins = int(el.get('nBins'))
for bin in el.findall('bin'):
self.bins.append( (float(bin.get("start")), float(bin.get("width")), int(bin.get("count"))) )
class Flow(object):
__slots__ = ['flowId', 'delayMean', 'packetLossRatio', 'rxBitrate', 'txBitrate',
'fiveTuple', 'packetSizeMean', 'probe_stats_unsorted',
'hopCount', 'flowInterruptionsHistogram', 'rx_duration']
def __init__(self, flow_el):
self.flowId = int(flow_el.get('flowId'))
rxPackets = long(flow_el.get('rxPackets'))
txPackets = long(flow_el.get('txPackets'))
tx_duration = float(long(flow_el.get('timeLastTxPacket')[:-2]) - long(flow_el.get('timeFirstTxPacket')[:-2]))*1e-9
rx_duration = float(long(flow_el.get('timeLastRxPacket')[:-2]) - long(flow_el.get('timeFirstRxPacket')[:-2]))*1e-9
self.rx_duration = rx_duration
self.probe_stats_unsorted = []
if rxPackets:
self.hopCount = float(flow_el.get('timesForwarded')) / rxPackets + 1
else:
self.hopCount = -1000
if rxPackets:
self.delayMean = float(flow_el.get('delaySum')[:-2]) / rxPackets * 1e-9
self.packetSizeMean = float(flow_el.get('rxBytes')) / rxPackets
else:
self.delayMean = None
self.packetSizeMean = None
if rx_duration > 0:
self.rxBitrate = long(flow_el.get('rxBytes'))*8 / rx_duration
else:
self.rxBitrate = None
if tx_duration > 0:
self.txBitrate = long(flow_el.get('txBytes'))*8 / tx_duration
else:
self.txBitrate = None
lost = float(flow_el.get('lostPackets'))
#print "rxBytes: %s; txPackets: %s; rxPackets: %s; lostPackets: %s" % (flow_el.get('rxBytes'), txPackets, rxPackets, lost)
if rxPackets == 0:
self.packetLossRatio = None
else:
self.packetLossRatio = (lost / (rxPackets + lost))
interrupt_hist_elem = flow_el.find("flowInterruptionsHistogram")
if interrupt_hist_elem is None:
self.flowInterruptionsHistogram = None
else:
self.flowInterruptionsHistogram = Histogram(interrupt_hist_elem)
class ProbeFlowStats(object):
__slots__ = ['probeId', 'packets', 'bytes', 'delayFromFirstProbe']
class Simulation(object):
def __init__(self, simulation_el):
self.flows = []
FlowClassifier_el, = simulation_el.findall("Ipv4FlowClassifier")
flow_map = {}
for flow_el in simulation_el.findall("FlowStats/Flow"):
flow = Flow(flow_el)
flow_map[flow.flowId] = flow
self.flows.append(flow)
for flow_cls in FlowClassifier_el.findall("Flow"):
flowId = int(flow_cls.get('flowId'))
flow_map[flowId].fiveTuple = FiveTuple(flow_cls)
for probe_elem in simulation_el.findall("FlowProbes/FlowProbe"):
probeId = int(probe_elem.get('index'))
for stats in probe_elem.findall("FlowStats"):
flowId = int(stats.get('flowId'))
s = ProbeFlowStats()
s.packets = int(stats.get('packets'))
s.bytes = long(stats.get('bytes'))
s.probeId = probeId
if s.packets > 0:
s.delayFromFirstProbe = parse_time_ns(stats.get('delayFromFirstProbeSum')) / float(s.packets)
else:
s.delayFromFirstProbe = 0
flow_map[flowId].probe_stats_unsorted.append(s)
def main(argv):
file_obj = open(argv[1])
print "Reading XML file ",
sys.stdout.flush()
level = 0
sim_list = []
for event, elem in ElementTree.iterparse(file_obj, events=("start", "end")):
if event == "start":
level += 1
if event == "end":
level -= 1
if level == 0 and elem.tag == 'FlowMonitor':
sim = Simulation(elem)
sim_list.append(sim)
elem.clear() # won't need this any more
sys.stdout.write(".")
sys.stdout.flush()
print " done."
for sim in sim_list:
for flow in sim.flows:
t = flow.fiveTuple
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow.flowId, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print "\tTX bitrate: %.2f kbit/s" % (flow.txBitrate*1e-3,)
print "\tRX bitrate: %.2f kbit/s" % (flow.rxBitrate*1e-3,)
print "\tMean Delay: %.2f ms" % (flow.delayMean*1e3,)
print "\tPacket Loss Ratio: %.2f %%" % (flow.packetLossRatio*100)
if __name__ == '__main__':
main(sys.argv)
|
gangadharkadam/vlinkerp
|
refs/heads/master
|
erpnext/tests/test_client.py
|
7
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import sel
from frappe.utils import formatdate
selenium_tests = True
class TestLogin(unittest.TestCase):
def setUp(self):
sel.login()
def test_material_request(self):
sel.new_doc("Stock", "Material Request")
sel.set_field("company", "_Test Company")
sel.add_child("items")
sel.set_field("item_code", "_Test Item")
sel.set_field("qty", "1")
sel.set_field("warehouse", "_Test Warehouse - _TC")
sel.set_field("schedule_date", formatdate())
sel.done_add_child("items")
sel.primary_action()
sel.wait_for_state("clean")
|
varunnaganathan/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
kadhikari/navitia
|
refs/heads/dev
|
source/jormungandr/tests/routing_tests_experimental.py
|
1
|
# Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from .tests_mechanism import config, NewDefaultScenarioAbstractTestFixture
from .journey_common_tests import *
from unittest import skip
from .routing_tests import OnBasicRouting
"""
This unit runs all the common tests in journey_common_tests.py along with locals tests added in this
unit for scenario experimental
"""
@config({'scenario': 'distributed'})
class TestJourneysDistributed(JourneyCommon, DirectPath, JourneyMinBikeMinCar, NewDefaultScenarioAbstractTestFixture):
"""
Test the experiental scenario
All the tests are defined in "TestJourneys" class, we only change the scenario
NOTE: for the moment we cannot import all routing tests, so we only get 2, but we need to add some more
"""
def test_journey_with_different_fallback_modes(self):
"""
Test when departure/arrival fallback modes are different
"""
query = journey_basic_query + "&first_section_mode[]=walking&last_section_mode[]=car&debug=true"
response = self.query_region(query)
check_best(response)
jrnys = response['journeys']
assert jrnys
assert jrnys[0]['sections'][0]['mode'] == 'walking'
assert jrnys[0]['sections'][-1]['mode'] == 'car'
context = response['context']
assert 'car_direct_path' in context
assert 'co2_emission' in context['car_direct_path']
def test_best_filtering(self):
"""
This feature is no longer supported"""
pass
def test_journeys_wheelchair_profile(self):
"""
This feature is no longer supported
"""
pass
def test_not_existent_filtering(self):
"""
This feature is no longer supported
"""
pass
def test_other_filtering(self):
"""
This feature is no longer supported
"""
pass
def test_street_network_routing_matrix(self):
from jormungandr import i_manager
from navitiacommon import response_pb2
instance = i_manager.instances['main_routing_test']
origin = instance.georef.place("stopB")
assert origin
destination = instance.georef.place("stopA")
assert destination
max_duration = 18000
mode = 'walking'
kwargs = {
"walking": instance.walking_speed,
"bike": instance.bike_speed,
"car": instance.car_speed,
"bss": instance.bss_speed,
"ridesharing": instance.car_no_park_speed,
}
request = {
"walking_speed": instance.walking_speed,
"bike_speed": instance.bike_speed,
"car_speed": instance.car_speed,
"bss_speed": instance.bss_speed,
"car_no_park_speed": instance.car_no_park_speed,
}
resp = instance.get_street_network_routing_matrix([origin], [destination],
mode, max_duration, request, **kwargs)
assert len(resp.rows[0].routing_response) == 1
assert resp.rows[0].routing_response[0].duration == 107
assert resp.rows[0].routing_response[0].routing_status == response_pb2.reached
max_duration = 106
resp = instance.get_street_network_routing_matrix([origin], [destination],
mode, max_duration, request, **kwargs)
assert len(resp.rows[0].routing_response) == 1
assert resp.rows[0].routing_response[0].duration == 0
assert resp.rows[0].routing_response[0].routing_status == response_pb2.unreached
def test_intersection_objects(self):
# The coordinates of arrival and the stop point are separated by 20m
r = self.query('/v1/coverage/main_routing_test/journeys?from=stopA&to=coord%3A8.98311981954709e-05%3A8.98311981954709e-05&datetime=20120614080000&')
assert len(r['journeys'][0]['sections']) == 3
# destination of crow_fly section and origin of next pt section should be the same object.
assert(r['journeys'][0]['sections'][0]['type'] == 'crow_fly')
assert(r['journeys'][0]['sections'][1]['type'] == 'public_transport')
assert(r['journeys'][0]['sections'][0]['to'] == r['journeys'][0]['sections'][1]['from'])
# destination of pt section and origin of next street_network section should be the same object.
assert(r['journeys'][0]['sections'][-1]['type'] == 'street_network')
assert(r['journeys'][0]['sections'][1]['to'] == r['journeys'][0]['sections'][-1]['from'])
r = self.query('/v1/coverage/main_routing_test/journeys?from=coord%3A8.98311981954709e-05%3A8.98311981954709e-05&to=stopA&datetime=20120614080000')
assert len(r['journeys'][0]['sections']) == 3
# destination of crow_fly section and origin of next pt section should be the same object.
assert(r['journeys'][0]['sections'][0]['type'] == 'street_network')
assert(r['journeys'][0]['sections'][1]['type'] == 'public_transport')
assert(r['journeys'][0]['sections'][0]['to'] == r['journeys'][0]['sections'][1]['from'])
# destination of pt section and origin of next street_network section should be the same object.
assert(r['journeys'][0]['sections'][-1]['type'] == 'crow_fly')
assert(r['journeys'][0]['sections'][1]['to'] == r['journeys'][0]['sections'][-1]['from'])
@config({"scenario": "distributed"})
class TestDistributedJourneysWithPtref(JourneysWithPtref, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedOnBasicRouting(OnBasicRouting, NewDefaultScenarioAbstractTestFixture):
@skip("temporarily disabled")
def test_isochrone(self):
super(TestDistributedOnBasicRouting, self).test_isochrone()
@config({"scenario": "distributed"})
class TestDistributedMinNbJourneys(JourneysMinNbJourneys, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedWithNightBusFilter(JourneysWithNightBusFilter, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedTimeFrameDuration(JourneysTimeFrameDuration, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed",
'instance_config': {
"ridesharing": [
{
"class": "jormungandr.scenarios.ridesharing.instant_system.InstantSystem",
"args": {
"service_url": "http://distributed_ridesharing.wtf",
"api_key": "key",
"network": "Super Covoit 3000",
"rating_scale_min": 0,
"rating_scale_max": 5
}
}
]}})
class TestJourneysRidesharingDistributed(JourneysRidesharing, JourneyCommon, DirectPath, JourneyMinBikeMinCar,
NewDefaultScenarioAbstractTestFixture):
def test_best_filtering(self):
"""
This feature is not supported
"""
pass
def test_journeys_wheelchair_profile(self):
"""
This feature is not supported
"""
pass
def test_not_existent_filtering(self):
"""
This feature is not supported
"""
pass
def test_other_filtering(self):
"""
This feature is not supported
"""
pass
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/gdata/src/gdata/tlslite/utils/PyCrypto_TripleDES.py
|
359
|
"""PyCrypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if pycryptoLoaded:
import Crypto.Cipher.DES3
def new(key, mode, IV):
return PyCrypto_TripleDES(key, mode, IV)
class PyCrypto_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "pycrypto")
self.context = Crypto.Cipher.DES3.new(key, mode, IV)
def encrypt(self, plaintext):
return self.context.encrypt(plaintext)
def decrypt(self, ciphertext):
return self.context.decrypt(ciphertext)
|
alexlo03/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/eos/eos.py
|
21
|
#
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
eos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])),
'use_ssl': dict(default=True, type='bool'),
'use_proxy': dict(default=True, type='bool'),
'validate_certs': dict(default=True, type='bool'),
'timeout': dict(type='int'),
'transport': dict(default='cli', choices=['cli', 'eapi'])
}
eos_argument_spec = {
'provider': dict(type='dict', options=eos_provider_spec),
}
eos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'use_ssl': dict(removed_in_version=2.9, type='bool'),
'validate_certs': dict(removed_in_version=2.9, type='bool'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9, choices=['cli', 'eapi'])
}
eos_argument_spec.update(eos_top_spec)
def get_provider_argspec():
return eos_provider_spec
def check_args(module, warnings):
pass
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in eos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_eapi(module):
conn = Eapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._session_support = None
self._connection = None
def _get_connection(self):
if self._connection:
return self._connection
self._connection = Connection(self._module._socket_path)
return self._connection
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
conn = self._get_connection()
try:
out = conn.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
connection = self._get_connection()
try:
response = connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def load_config(self, commands, commit=False, replace=False):
"""Loads the config commands onto the remote device
"""
conn = self._get_connection()
try:
response = conn.edit_config(commands, commit, replace)
except ConnectionError as exc:
message = getattr(exc, 'err', to_text(exc))
if "check mode is not supported without configuration session" in message:
self._module.warn("EOS can not check config without config session")
response = {'changed': True}
else:
self._module.fail_json(msg="%s" % message, data=to_text(message, errors='surrogate_then_replace'))
return response
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = self._get_connection()
try:
diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path,
diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
class Eapi:
def __init__(self, module):
self._module = module
self._enable = None
self._session_support = None
self._device_configs = {}
host = module.params['provider']['host']
port = module.params['provider']['port']
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
if module.params['provider']['use_ssl']:
proto = 'https'
else:
proto = 'http'
module.params['validate_certs'] = module.params['provider']['validate_certs']
self._url = '%s://%s:%s/command-api' % (proto, host, port)
if module.params['auth_pass']:
self._enable = {'cmd': 'enable', 'input': module.params['auth_pass']}
else:
self._enable = 'enable'
@property
def supports_sessions(self):
if self._session_support:
return self._session_support
response = self.send_request(['show configuration sessions'])
self._session_support = 'error' not in response
return self._session_support
def _request_builder(self, commands, output, reqid=None):
params = dict(version=1, cmds=commands, format=output)
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
def send_request(self, commands, output='text'):
commands = to_list(commands)
if self._enable:
commands.insert(0, self._enable)
body = self._request_builder(commands, output)
data = self._module.jsonify(body)
headers = {'Content-Type': 'application/json-rpc'}
timeout = self._module.params['timeout']
use_proxy = self._module.params['provider']['use_proxy']
response, headers = fetch_url(
self._module, self._url, data=data, headers=headers,
method='POST', timeout=timeout, use_proxy=use_proxy
)
if headers['status'] != 200:
self._module.fail_json(**headers)
try:
data = response.read()
response = self._module.from_json(to_text(data, errors='surrogate_then_replace'))
except ValueError:
self._module.fail_json(msg='unable to load response from device', data=data)
if self._enable and 'result' in response:
response['result'].pop(0)
return response
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
output = None
queue = list()
responses = list()
def _send(commands, output):
response = self.send_request(commands, output=output)
if 'error' in response:
err = response['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return response['result']
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).replace('| json', '')
item['output'] = 'json'
if output and output != item['output']:
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
for index, item in enumerate(commands):
try:
responses[index] = responses[index]['output'].strip()
except KeyError:
pass
return responses
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out['result'][0]['output']).strip()
self._device_configs[cmd] = cfg
return cfg
def configure(self, commands):
"""Sends the ordered set of commands to the device
"""
cmds = ['configure terminal']
cmds.extend(commands)
responses = self.send_request(commands)
if 'error' in responses:
err = responses['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return responses[1:]
def load_config(self, config, commit=False, replace=False):
"""Loads the configuration onto the remote devices
If the device doesn't support configuration sessions, this will
fallback to using configure() to load the commands. If that happens,
there will be no returned diff or session values
"""
use_session = os.getenv('ANSIBLE_EOS_USE_SESSIONS', True)
try:
use_session = int(use_session)
except ValueError:
pass
if not all((bool(use_session), self.supports_sessions)):
if commit:
return self.configure(config)
else:
self._module.warn("EOS can not check config without config session")
result = {'changed': True}
return result
session = 'ansible_%s' % int(time.time())
result = {'session': session}
commands = ['configure session %s' % session]
if replace:
commands.append('rollback clean-config')
commands.extend(config)
response = self.send_request(commands)
if 'error' in response:
commands = ['configure session %s' % session, 'abort']
self.send_request(commands)
err = response['error']
error_text = []
for data in err['data']:
error_text.extend(data.get('errors', []))
error_text = '\n'.join(error_text) or err['message']
self._module.fail_json(msg=error_text, code=err['code'])
commands = ['configure session %s' % session, 'show session-config diffs']
if commit:
commands.append('commit')
else:
commands.append('abort')
response = self.send_request(commands, output='text')
diff = response['result'][1]['output']
if len(diff) > 0:
result['diff'] = diff
return result
# get_diff added here to support connection=local and transport=eapi scenario
def get_diff(self, candidate, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=3)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=3, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
configdiff = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
diff['config_diff'] = configdiff if configdiffobjs else {}
return diff
def is_json(cmd):
return to_native(cmd, errors='surrogate_then_replace').endswith('| json')
def is_eapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'eapi' in (transport, provider_transport)
def to_command(module, commands):
if is_eapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(type='list'),
answer=dict(type='list'),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(to_list(commands))
def get_config(module, flags=None):
flags = None if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc=check_rc)
def load_config(module, config, commit=False, replace=False):
conn = get_connection(module)
return conn.load_config(config, commit, replace)
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = self.get_connection()
return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
|
robwarm/gpaw-symm
|
refs/heads/master
|
doc/documentation/tddft/Be_8bands_lrtddft_dE.py
|
1
|
from gpaw import GPAW
from gpaw.lrtddft import LrTDDFT
c = GPAW('Be_gs_8bands.gpw')
dE = 10 # maximal Kohn-Sham transition energy to consider in eV
lr = LrTDDFT(c, xc='LDA', energy_range=dE)
lr.write('lr_dE.dat.gz')
|
aforalee/RRally
|
refs/heads/master
|
tests/unit/plugins/openstack/scenarios/ironic/test_utils.py
|
6
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.ironic import utils
from tests.unit import test
IRONIC_UTILS = "rally.plugins.openstack.scenarios.ironic.utils"
class IronicScenarioTestCase(test.ScenarioTestCase):
@mock.patch("rally.common.utils.generate_random_name")
def test__create_node(self, mock_generate_random_name):
mock_generate_random_name.return_value = "rally_fake_random_string"
self.admin_clients("ironic").node.create.return_value = "fake_node"
scenario = utils.IronicScenario(self.context)
create_node = scenario._create_node(fake_param="foo")
self.assertEqual("fake_node", create_node)
self.admin_clients("ironic").node.create.assert_called_once_with(
fake_param="foo", name="rally_fake_random_string")
self._test_atomic_action_timer(scenario.atomic_actions(),
"ironic.create_node")
def test__delete_node(self):
mock_node_delete = mock.Mock()
self.admin_clients("ironic").node.delete = mock_node_delete
scenario = utils.IronicScenario(self.context)
scenario._delete_node("fake_id")
self.admin_clients("ironic").node.delete.assert_called_once_with(
"fake_id")
self._test_atomic_action_timer(scenario.atomic_actions(),
"ironic.delete_node")
def test__list_nodes(self):
self.admin_clients("ironic").node.list.return_value = ["fake"]
scenario = utils.IronicScenario(self.context)
fake_params = {
"sort_dir": "foo1",
"associated": "foo2",
"sort_key": "foo3",
"detail": True,
"limit": "foo4",
"maintenance": "foo5",
"marker": "foo6"
}
return_nodes_list = scenario._list_nodes(**fake_params)
self.assertEqual(["fake"], return_nodes_list)
self.admin_clients("ironic").node.list.assert_called_once_with(
sort_dir="foo1", associated="foo2", sort_key="foo3", detail=True,
limit="foo4", maintenance="foo5", marker="foo6")
self._test_atomic_action_timer(scenario.atomic_actions(),
"ironic.list_nodes")
|
tschijnmo/osmABTS
|
refs/heads/master
|
osmABTS/readosm.py
|
1
|
"""
OpenStreetMap XML file parsing
==============================
This module defined classes for nodes, ways and a shallow data structure for
all of these raw information from OSM XML file, as well as a parser based on
the standard expat library.
It contains classes for holding information about the raw map
.. autosummary::
:toctree: generated
:template: classtempl.rstt
Node
Way
RawOSM
and one function for parsing the raw OSM XML file in the data structure
.. autosummary::
:toctree: generated
read_osm
"""
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-branches
import xml.parsers.expat as expat
import numpy as np
#
# Class definitions
# -----------------
#
class Node(object):
"""Nodes in the OSM XML file
It mainly consists of the following attributes,
.. py:attribute:: coord
The coordinates for the node, in latitudes and longitudes.
.. py:attribute:: tags
A dictionary for the tags, using the same key and value as in the XML
file.
"""
__slots__ = [
"coord",
"tags",
]
def __init__(self, attrs):
"""Initializes a node based on the attributes in XML
The coordinates are going to be set, with the tags initialized to an
empty dictionary.
"""
self.coord = np.array(
(attrs['lat'], attrs['lon']),
dtype=np.float64
)
self.tags = {}
class Way(object):
"""Ways from the OSM XML file
It primarily consists of the following attributes
.. py:attribute:: nodes
A list of identity references for the nodes
.. py:attribute:: tags
The tags dictionary, the same as that for the nodes
"""
__slots__ = [
"nodes",
"tags",
]
def __init__(self):
"""Initializes an instance
The initialization is going to be trivial, just the lists and
dictionaries set to empty ones.
"""
self.nodes = []
self.tags = {}
class RawOSM(object):
"""Raw GIS data from OpenStreetMap
The XML input is going to be parsed into an instance of this project. The
primary fields are
.. py:attribute:: nodes
A dictionary of nodes, with the identity integer as the key as the
:py:class:`Node` instances as values.
.. py:attribute:: ways
A similar dictionary of ways, with identity as the key and the
:py:class:`Way` instances as the values.
"""
__slots__ = [
"nodes",
"ways",
]
def __init__(self):
"""Initializes the instance
It just sets the two dictionaries into empty ones.
"""
self.nodes = {}
self.ways = {}
#
# The parser function
# -------------------
#
def read_osm(file_name):
"""Reads the OSM XML file with given name
:param file_name: The name of the OSM XML file
:returns: A :py:class:`RawOSM` instance for the data
:raises: :py:exc:`ValueError` if something went wrong
"""
raw_osm = RawOSM()
# The current state, to be used as a stack
# Its entries should be pair of element id and element for nodes and ways
current_state = []
# The closures for the call-backs
def start_element(name, attrs):
"""Call-back at the start of element"""
# At top level
if name == 'osm':
pass
# For a node
elif name == 'node':
current_state.append(
(int(attrs['id']), Node(attrs))
)
# For a way
elif name == 'way':
current_state.append(
(int(attrs['id']), Way())
)
# For a node in a way
elif name == 'nd':
parent = current_state[-1]
if isinstance(parent, tuple) and isinstance(parent[1], Way):
parent[1].nodes.append(int(attrs['ref']))
else:
pass
# if a tag
elif name == 'tag':
parent = current_state[-1]
if isinstance(parent, tuple) and (
isinstance(parent[1], Node) or isinstance(parent[1], Way)
):
(parent[1].tags)[attrs['k']] = attrs['v']
else:
pass
# For unused relation
elif name == 'relation':
current_state.append(None)
elif name == 'member':
pass
elif name == 'bounds':
pass
else:
raise ValueError('Unrecognized XML node %s' % name)
def end_element(name):
"""Call back at the end of elements"""
if name in ['osm', 'tag', 'member', 'nd', 'bounds']:
pass
elif name == 'node':
new_node = current_state.pop()
raw_osm.nodes[new_node[0]] = new_node[1]
elif name == 'way':
new_way = current_state.pop()
raw_osm.ways[new_way[0]] = new_way[1]
elif name == 'relation':
current_state.pop()
else:
raise ValueError('Unrecognized XML node %s' % name)
parser = expat.ParserCreate()
parser.StartElementHandler = start_element
parser.EndElementHandler = end_element
try:
input_file = open(file_name, 'r')
parser.ParseFile(input_file)
except IOError:
raise ValueError('Input file %s unable to be opened' % file_name)
except expat.ExpatError as err:
raise ValueError(
'Expat parsing failure at line %d column %d of file %s' % (
err.lineno, err.offset, input_file
)
)
return raw_osm
|
aidan-fitz/instant-press
|
refs/heads/master
|
modules/users.py
|
7
|
# -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import math
import datetime
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
from gluon.sql import *
import gluon.contrib.simplejson as sj
#local
from utils import *
ADMIN_USERS_LIST_PER_PAGE = 5
ADMIN_MAX_LIST_PAGES = 10
class Users(object):
def __init__(self, i2p):
self.i2p = i2p
def get_user_title(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
if request.env.web2py_runtime_gae:
#in gae the user administrator is the only author.
user_title = T("Site administrator")
else:
user_title = T("Anonymous")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
user_title = user.last_name + ", " + user.first_name
return user_title
def is_user_an_admin(self):
request = self.i2p.environment.request
auth = self.i2p.environment.auth
if request.env.web2py_runtime_gae:
from google.appengine.api import users
if users.is_current_user_admin():
return True
else:
if auth:
if auth.is_logged_in():
is_admin = auth.has_membership(auth.id_group(self.i2p.config.group_admin))
if is_admin:
return True
return False
#added support to gae admin users
def check_credentials_is_admin(self):
request = self.i2p.environment.request
auth = self.i2p.environment.auth
is_an_admin = self.is_user_an_admin()
if not is_an_admin:
if request.env.web2py_runtime_gae:
from google.appengine.api import users
login_html = '<a href="%s">%s</a>.' \
% (users.create_login_url(request.env.path_info), \
T('Sign in with your google account'))
raise HTTP(200, '<html><body>%s</body></html>' % login_html)
else:
next = auth.settings.on_failed_authorization
redirect(next)
return is_an_admin
#The user is logged in?
def is_user_logged_in(self):
logged_in=False
auth = self.i2p.environment.auth
if auth:
if auth.is_logged_in():
logged_in=True
return logged_in
class admUsers(object):
def __init__(self, i2p):
self.i2p = i2p
#ADMIN
def list(self, currentpage=1, search_text=""):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
auth = self.i2p.environment.auth
if not isinstance(search_text, (unicode, str) ):
search_text = ""
max_users=ADMIN_USERS_LIST_PER_PAGE
max_display_pages=ADMIN_MAX_LIST_PAGES
limit_inf = (max_users * currentpage) - max_users
limit_sup = limit_inf + max_users
query = (db.auth_user.id>0)
if search_text!="":
query = query & (db.auth_user.last_name.like(search_text+"%"))
count_users = db(query).count()
last_users = db(query).select(db.auth_user.ALL,\
orderby=~db.auth_user.created_on,\
limitby=(limit_inf, limit_sup))
link_register = A(T('Register'), \
_href=URL(request.application,\
self.i2p.config.controller_default,\
'user/register'), \
_style="padding-left: 5px;")
icon_register_url = URL(request.application,'static','images/toolbar_add.png')
toolbar_register_style = 'padding-left: 20px; background-image: url(%s); background-repeat: no-repeat;' \
% icon_register_url
refresh_list = A(T("Refresh"), _href='javascript: void(0);', \
_onclick="UserList(1);" ,\
_title="%s"%T('Reload the list'))
icon_refresh_url = URL(request.application,'static','images/toolbar_refresh.png')
toolbar_refresh_style = 'padding-left: 20px; background-image: url(%s); background-repeat: no-repeat;' \
% icon_refresh_url
input_search = '<input type="text" id="input-search" style="width: 200px; height: 20px; margin: 0px;" />'
icon_search_url = URL(request.application,'static','images/search.png')
icon_search = IMG(_src=icon_search_url, _alt="search")
do_search = A(icon_search, _href='javascript: void(0);', \
_onclick="UserSearch();" ,\
_title="%s"%T('Search in last name'))
toolbar_register = '<li style="%s">%s</li>' % (toolbar_register_style,link_register.xml())
toolbar_refresh = '<li style="%s">%s</li>' % (toolbar_refresh_style,refresh_list.xml())
toolbar_input_search = '<li>%s %s</li>' % (input_search, do_search.xml())
toolbar = '<ul>%s %s %s</ul>' % (toolbar_register,toolbar_refresh,toolbar_input_search)
list = '<div class="toolbar" style="height: 40px; width: 500px;">%s</div>' % toolbar
if last_users:
#create the header column
checkbox_all = '<input type="checkbox" id="checkboxall" />'
caption_column1 = checkbox_all
caption_column2 = T('Avatar')
caption_column3 = T('Last name')
caption_column4 = T('First name')
caption_column5 = T('Email')
caption_column6 = T('Status')
caption_column7 = T('Actions')
caption_column8 = T('Created')
row_column1 = '<div class="column1">%s</div>' % caption_column1
row_column2 = '<div class="column2">%s</div>' % caption_column2
row_column3 = '<div class="column3">%s</div>' % caption_column3
row_column4 = '<div class="column4">%s</div>' % caption_column4
row_column5 = '<div class="column5">%s</div>' % caption_column5
row_column6 = '<div class="column6">%s</div>' % caption_column6
row_column7 = '<div class="column7">%s</div>' % caption_column7
row_column8 = '<div class="column8">%s</div>' % caption_column8
row_clear = '<div style="clear: both;"></div>'
row_user_xml = '<div class="row-user-headers"> %s %s %s %s %s %s %s %s %s </div>' \
% (row_column1,row_column2,row_column3,row_column4,\
row_column5,row_column6,row_column7,row_column8,row_clear)
list += row_user_xml
#titles are hints
title_edit_firstname = T('Click to change the first name of this user')
title_edit_lastname = T('Click to change the last name of this user')
title_edit_email = T('Click to change the email of this user')
title_delete = T('Click to delete this user')
title_activate = T('Click to activate this user, this will delete the disabled, blocked and pending status of the current user')
title_disable = T('Click to disable this user')
title_changepass = T('Click to change pass')
title_setasadmin = T('Click to set this user as an admin. In AppEngine make an admin in your Appspot account')
title_block = T('Click to block this user')
#id group admin
id_group_admin = auth.id_group(self.i2p.config.group_admin)
for user in last_users:
title_avatar = T('User ID: %s'%user.id)
if user.registration_key == 'pending':
caption_status = '<span style="color: orange;">' + str(T('Pending')) + '</span>'
elif user.registration_key == 'disabled':
caption_status = '<span style="color: orange;">' + str(T('Disabled'))+ '</span>'
elif user.registration_key == 'blocked':
caption_status = '<span style="color: orange;">' + str(T('Blocked')) + '</span>'
else:
caption_status = '<span style="color: green;">' + str(T('Active')) + '</span>'
if auth.has_membership(id_group_admin, user.id, self.i2p.config.group_admin):
caption_status += ', <span style="color: red;">' + str(T('Admin'))+ '</span>'
checkbox_user = '<input type="checkbox" id="checkbox-%s" />'%user.id
icon_avatar = IMG(_src=URL(request.application,'static','images/avatar.png'), \
_alt="avatar", _width="24px", _height="24px", \
_title="%s"%title_avatar)
link_edit_firstname = A(user.first_name, _href='javascript: void(0);', \
_onclick="UserFirstName(%s);"%(user.id), \
_title="%s"%title_edit_firstname)
link_edit_lastname = A(user.last_name, _href='javascript: void(0);', \
_onclick="UserLastName(%s);"%(user.id), \
_title="%s"%title_edit_lastname)
link_edit_email = A(user.email , _href='javascript: void(0);', \
_onclick="UserEmail(%s);"%(user.id), \
_title="%s"%title_edit_email)
icon_remove = IMG(_src=URL(request.application,'static','images/remove.png'), \
_alt="remove")
link_delete = A(icon_remove , _href='javascript: void(0);', \
_onclick="UserDelete(%s);"%(user.id), \
_title="%s"%title_delete)
icon_activate = IMG(_src=URL(request.application,'static','images/activate.png'), \
_alt="activate")
link_activate = A(icon_activate , _href='javascript: void(0);', \
_onclick="UserActivate(%s);"%(user.id), \
_title="%s"%title_activate)
icon_disable = IMG(_src=URL(request.application,'static','images/disable.png'), \
_alt="disable")
link_desactivate = A(icon_disable , _href='javascript: void(0);', \
_onclick="UserDisable(%s);"%(user.id), \
_title="%s"%title_disable)
icon_change = IMG(_src=URL(request.application,'static','images/pass.gif'), \
_alt="change pass")
link_change = A(icon_change, _href='javascript: void(0);', \
_onclick="UserPassword(%s);"%(user.id), \
_title="%s"%title_changepass)
icon_setadmin = IMG(_src=URL(request.application,'static','images/setadmin.png'), \
_alt="set admin")
link_setadmin = A(icon_setadmin , _href='javascript: void(0);', \
_onclick="UserSetAdmin(%s);"%(user.id), \
_title="%s"%title_setasadmin)
link_block = A(icon_disable , _href='javascript: void(0);', \
_onclick="UserBlock(%s);"%(user.id), \
_title="%s"%title_block)
link_actions = link_delete.xml() + ' ' + link_activate.xml() + ' ' + \
link_desactivate.xml() + ' ' + link_change.xml() + ' ' + \
link_setadmin.xml() + ' ' + link_block.xml()
created_on = user.created_on.strftime("%Y-%m-%d:%I:%M:%p")
row_column1 = '<div class="column1">%s</div>' % checkbox_user
row_column2 = '<div class="column2">%s</div>' % icon_avatar.xml()
row_column3 = '<div class="column3">%s</div>' % link_edit_lastname.xml()
row_column4 = '<div class="column4">%s</div>' % link_edit_firstname.xml()
row_column5 = '<div class="column5">%s</div>' % link_edit_email.xml()
row_column6 = '<div class="column6">%s</div>' % caption_status
row_column7 = '<div class="column7">%s</div>' % link_actions
row_column8 = '<div class="column8">%s</div>' % created_on
row_clear = '<div style="clear: both;"></div>'
row_user_xml = '<div class="row-user" id="row-%s"> %s %s %s %s %s %s %s %s %s</div>' \
% (user.id,row_column1,row_column2,row_column3,row_column4,\
row_column5,row_column6,row_column7,row_column8,row_clear)
list += row_user_xml
if count_users>max_users:
total_pages = count_users // max_users
if (count_users % max_users)>0:
total_pages += 1
first_page = int(math.ceil(currentpage / max_display_pages)) * max_display_pages
if first_page<1:
first_page=1
if total_pages < max_display_pages:
last_page = total_pages
else:
last_page=max_display_pages
else:
last_page=first_page + max_display_pages
backward = A(T("Prior"), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(currentpage-1,search_text))
forward = A(T("Next"), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(currentpage+1,search_text))
listpages=""
if currentpage>1:
listpages += "<li>%s</li>" % backward.xml()
for page in range(first_page, last_page+1):
page_a = A(unicode(page), _href='javascript: void(0);', \
_onclick="UsersList(%s,'%s');"%(page,search_text))
if page<=total_pages:
if page==currentpage:
class_current = ' class="current"'
else:
class_current = ''
listpages += "<li%s>%s</li>" % (class_current, page_a.xml())
if total_pages>currentpage:
listpages += "<li>%s</li>" % forward.xml()
if listpages!="":
list+='<div class="pages"><ul>%s</ul></div>' % listpages
page_content=list
else:
page_content=list + "%s"%T("No users")
html_content = '<h2>%s</h2>'%T("Users")
html_content += "%s"%page_content
info={}
info['html']=sanitate_string(html_content)
return sj.dumps(info)
def delete(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot delete yourself!"),\
alert=2,value="")
id_user = user.id
db(db.auth_group.role=='user_%s'%id_user).delete()
db(db.auth_membership.user_id==id_user).delete()
db(db.auth_user.id == id_user).delete()
return json_response(message=T("User deleted"),\
alert=0,value="")
else:
return json_response(message=T("You cannot delete default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def disable(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot disable yourself!"),\
alert=2,value="")
user.update_record(registration_key = 'disabled')
return json_response(message=T("User disabled"),\
alert=0,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def activate(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot activate yourself!"), \
alert=2,value="")
user.update_record(registration_key = '')
return json_response(message= T("User activated"),\
alert=0,value="")
else:
return json_response(message=T("You cannot activate default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def setadmin(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot set as admin yourself!"),\
alert=2,value="")
id_group_admin = auth.id_group(self.i2p.config.group_admin)
if auth.has_membership(id_group_admin, user.id, self.i2p.config.group_admin):
auth.del_membership(id_group_admin, user.id)
return json_response(message=T("User has been removed from admin list") ,\
alert=0,value="")
else:
auth.add_membership(id_group_admin, user.id)
return json_response(message=T("User has been added to admin list"),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def save_firstname(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(first_name = value)
return json_response(message=T('Firstname updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def save_lastname(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(last_name = value)
return json_response(message=T('Lastname updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
#need to check if IS IN DB
def save_email(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
notvalid = (IS_EMAIL()(value))[1]
if notvalid:
return json_response(message=T("The email is not valid"),\
alert=2,value="")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
user.update_record(email = value)
return json_response(message=T('Email updated'),\
alert=0,value="")
else:
return json_response(message=T("You cannot edit default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_email(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.email
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_firstname(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.first_name
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def get_lastname(self, id):
db = self.i2p.db
T = self.i2p.environment.T
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = user.last_name
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def block(self, id):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message= T("You cannot block yourself!"),\
alert=2,value="")
user.update_record(registration_key = 'blocked')
return json_response(message=T("User blocked"),\
alert=0,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
def change_password(self, id, value):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
session = self.i2p.environment.session
auth = self.i2p.environment.auth
value = value.strip()
notvalid = (IS_LENGTH(minsize=6)(value))[1]
if notvalid:
return json_response(message=T("The password is not valid, the minsize of a password is 6 character"),\
alert=2,value="")
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
if user.email!=self.i2p.config.email or request.env.web2py_runtime_gae:
if not request.env.web2py_runtime_gae:
if user.email==auth.user.email:
return json_response(message=T("You cannot change your password in this panel, use your profile"),\
alert=2,value="")
my_crypt = CRYPT(key=auth.settings.hmac_key)
crypt_pass = my_crypt(value)[0]
user.update_record(password = crypt_pass)
return json_response(message= T("User password changed"),\
alert=0,value="")
else:
return json_response(message=T("You cannot change passwor of the default user!"),\
alert=2,value="")
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
#this only return a random generated password,
#the password are hashed in db.
def get_password(self, id):
db = self.i2p.db
T = self.i2p.environment.T
def random_password():
import string
import random
password = ''
specials=r'!#$*'
for i in range(0,3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password,len(password)))
users = db(db.auth_user.id == id).select()
if users:
user = users[0]
value = random_password()
return json_response(message="",alert=0,value=value)
else:
return json_response(message=T("The user doesn't exist!"),\
alert=2,value="")
|
bspink/django
|
refs/heads/master
|
django/contrib/gis/geos/coordseq.py
|
129
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_bgp_neighbor.py
|
62
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bgp_neighbor
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP neighbors configurations.
description:
- Manages BGP neighbors configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the whole BGP neighbor configuration.
- Default, where supported, restores params default value.
options:
asn:
description:
- BGP autonomous system number. Valid values are string,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
default: default
neighbor:
description:
- Neighbor Identifier. Valid values are string. Neighbors may use
IPv4 or IPv6 notation, with or without prefix length.
required: true
description:
description:
- Description of the neighbor.
connected_check:
description:
- Configure whether or not to check for directly connected peer.
type: bool
capability_negotiation:
description:
- Configure whether or not to negotiate capability with
this neighbor.
type: bool
dynamic_capability:
description:
- Configure whether or not to enable dynamic capability.
type: bool
ebgp_multihop:
description:
- Specify multihop TTL for a remote peer. Valid values are
integers between 2 and 255, or keyword 'default' to disable
this property.
local_as:
description:
- Specify the local-as number for the eBGP neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
log_neighbor_changes:
description:
- Specify whether or not to enable log messages for neighbor
up/down event.
choices: ['enable', 'disable', 'inherit']
low_memory_exempt:
description:
- Specify whether or not to shut down this neighbor under
memory pressure.
type: bool
maximum_peers:
description:
- Specify Maximum number of peers for this neighbor prefix
Valid values are between 1 and 1000, or 'default', which does
not impose the limit. Note that this parameter is accepted
only on neighbors with address/prefix.
pwd:
description:
- Specify the password for neighbor. Valid value is string.
pwd_type:
description:
- Specify the encryption type the password will use. Valid values
are '3des' or 'cisco_type_7' encryption or keyword 'default'.
choices: ['3des', 'cisco_type_7', 'default']
remote_as:
description:
- Specify Autonomous System Number of the neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
remove_private_as:
description:
- Specify the config to remove private AS number from outbound
updates. Valid values are 'enable' to enable this config,
'disable' to disable this config, 'all' to remove all
private AS number, or 'replace-as', to replace the private
AS number.
choices: ['enable', 'disable', 'all', 'replace-as']
shutdown:
description:
- Configure to administratively shutdown this neighbor.
type: bool
suppress_4_byte_as:
description:
- Configure to suppress 4-byte AS Capability.
type: bool
timers_keepalive:
description:
- Specify keepalive timer value. Valid values are integers
between 0 and 3600 in terms of seconds, or 'default',
which is 60.
timers_holdtime:
description:
- Specify holdtime timer value. Valid values are integers between
0 and 3600 in terms of seconds, or 'default', which is 180.
transport_passive_only:
description:
- Specify whether or not to only allow passive connection setup.
Valid values are 'true', 'false', and 'default', which defaults
to 'false'. This property can only be configured when the
neighbor is in 'ip' address format without prefix length.
type: bool
update_source:
description:
- Specify source interface of BGP session and updates.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# create a new neighbor
- nxos_bgp_neighbor:
asn: 65535
neighbor: 192.0.2.3
local_as: 20
remote_as: 30
description: "just a description"
update_source: Ethernet1/3
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "neighbor 192.0.2.3",
"remote-as 30", "update-source Ethernet1/3",
"description just a description", "local-as 20"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'capability_negotiation',
'shutdown',
'connected_check',
'dynamic_capability',
'low_memory_exempt',
'suppress_4_byte_as',
'transport_passive_only',
]
PARAM_TO_COMMAND_KEYMAP = {
'asn': 'router bgp',
'capability_negotiation': 'dont-capability-negotiate',
'connected_check': 'disable-connected-check',
'description': 'description',
'dynamic_capability': 'dynamic-capability',
'ebgp_multihop': 'ebgp-multihop',
'local_as': 'local-as',
'log_neighbor_changes': 'log-neighbor-changes',
'low_memory_exempt': 'low-memory exempt',
'maximum_peers': 'maximum-peers',
'neighbor': 'neighbor',
'pwd': 'password',
'pwd_type': 'password',
'remote_as': 'remote-as',
'remove_private_as': 'remove-private-as',
'shutdown': 'shutdown',
'suppress_4_byte_as': 'capability suppress 4-byte-as',
'timers_keepalive': 'timers',
'timers_holdtime': 'timers',
'transport_passive_only': 'transport connection-mode passive',
'update_source': 'update-source',
'vrf': 'vrf'
}
PARAM_TO_DEFAULT_KEYMAP = {
'shutdown': False,
'dynamic_capability': True,
'timers_keepalive': 60,
'timers_holdtime': 180
}
def get_value(arg, config):
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'^\s+{0}$'.format(command), config, re.M)
has_command_val = re.search(r'(?:\s+{0}\s*)(?P<value>.*)$'.format(command), config, re.M)
if arg == 'dynamic_capability':
has_no_command = re.search(r'\s+no\s{0}\s*$'.format(command), config, re.M)
value = True
if has_no_command:
value = False
elif arg in BOOL_PARAMS:
value = False
if has_command:
value = True
elif arg == 'log_neighbor_changes':
value = ''
if has_command:
value = 'enable'
elif has_command_val:
value = 'disable'
elif arg == 'remove_private_as':
value = 'disable'
if has_command:
value = 'enable'
elif has_command_val:
value = has_command_val.group('value')
else:
value = ''
if has_command_val:
value = has_command_val.group('value')
if command in ['timers', 'password']:
split_value = value.split()
value = ''
if arg in ['timers_keepalive', 'pwd_type']:
value = split_value[0]
elif arg in ['timers_holdtime', 'pwd'] and len(split_value) == 2:
value = split_value[1]
return value
def get_existing(module, args, warnings):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
asn_regex = re.compile(r'.*router\sbgp\s(?P<existing_asn>\d+(\.\d+)?).*', re.S)
match_asn = asn_regex.match(str(netcfg))
if match_asn:
existing_asn = match_asn.group('existing_asn')
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'vrf', 'neighbor']:
existing[arg] = get_value(arg, config)
existing['asn'] = existing_asn
existing['neighbor'] = module.params['neighbor']
existing['vrf'] = module.params['vrf']
else:
warnings.append("The BGP process didn't exist but the task"
" just created it.")
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'log-neighbor-changes':
if value == 'enable':
commands.append('{0}'.format(key))
elif value == 'disable':
commands.append('{0} {1}'.format(key, value))
elif value == 'inherit':
if existing_commands.get(key):
commands.append('no {0}'.format(key))
elif key == 'password':
pwd_type = module.params['pwd_type']
if pwd_type == '3des':
pwd_type = 3
else:
pwd_type = 7
command = '{0} {1} {2}'.format(key, pwd_type, value)
if command not in commands:
commands.append(command)
elif key == 'remove-private-as':
if value == 'enable':
command = '{0}'.format(key)
commands.append(command)
elif value == 'disable':
if existing_commands.get(key) != 'disable':
command = 'no {0}'.format(key)
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
elif key == 'timers':
if (proposed['timers_keepalive'] != PARAM_TO_DEFAULT_KEYMAP.get('timers_keepalive') or
proposed['timers_holdtime'] != PARAM_TO_DEFAULT_KEYMAP.get('timers_holdtime')):
command = 'timers {0} {1}'.format(
proposed['timers_keepalive'],
proposed['timers_holdtime'])
if command not in commands:
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['router bgp {0}'.format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
# make sure that local-as is the last command in the list.
local_as_command = 'local-as {0}'.format(module.params['local_as'])
if local_as_command in commands:
commands.remove(local_as_command)
commands.append(local_as_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
commands.append('no neighbor {0}'.format(module.params['neighbor']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
neighbor=dict(required=True, type='str'),
description=dict(required=False, type='str'),
capability_negotiation=dict(required=False, type='bool'),
connected_check=dict(required=False, type='bool'),
dynamic_capability=dict(required=False, type='bool'),
ebgp_multihop=dict(required=False, type='str'),
local_as=dict(required=False, type='str'),
log_neighbor_changes=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
low_memory_exempt=dict(required=False, type='bool'),
maximum_peers=dict(required=False, type='str'),
pwd=dict(required=False, type='str'),
pwd_type=dict(required=False, type='str', choices=['3des', 'cisco_type_7', 'default']),
remote_as=dict(required=False, type='str'),
remove_private_as=dict(required=False, type='str', choices=['enable', 'disable', 'all', 'replace-as']),
shutdown=dict(required=False, type='bool'),
suppress_4_byte_as=dict(required=False, type='bool'),
timers_keepalive=dict(required=False, type='str'),
timers_holdtime=dict(required=False, type='str'),
transport_passive_only=dict(required=False, type='bool'),
update_source=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=[['timers_holdtime', 'timers_keepalive'], ['pwd', 'pwd_type']],
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
if module.params['pwd_type'] == 'default':
module.params['pwd_type'] = '0'
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args, warnings)
if existing.get('asn') and state == 'present':
if existing['asn'] != module.params['asn']:
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf', 'neighbor', 'pwd_type']:
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key, 'default')
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jiobert/python
|
refs/heads/master
|
Horan_Colby/email_validation-db/emailval.py
|
2
|
from flask import Flask
from flask import render_template
from flask import session
from flask import request
from flask import redirect
from flask import flash
from mysqlconnection import MySQLConnector
import re
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = 'SSSH'
mysql = MySQLConnector(app, 'emaildb')
@app.route('/')
def index():
return render_template('emailval.html')
@app.route('/email', methods= ['post'])
def process():
if not email_regex.match(request.form['email']):
flash("Invalid Email Address!")
return redirect('/')
query = "INSERT INTO email (email, created_at) VALUES (:email, NOW())"
data = {
'email' : request.form['email']
}
email = mysql.query_db(query, data)
query = "SELECT * FROM email"
email = mysql.query_db(query)
return render_template('/success.html', all_email=email)
@app.route('/success')
def show_user():
return render_template('success.html')
app.run(debug=True)
|
jaimegildesagredo/expects_docs_mamba_formatter
|
refs/heads/master
|
mamba/example_group.py
|
6
|
# -*- coding: utf-8 -*-
import sys
from datetime import datetime, timedelta
import inspect
from mamba import error
from mamba.example import Example, PendingExample
class ExecutionContext(object):
pass
class ExampleGroup(object):
def __init__(self, subject, parent=None, execution_context=None):
self.subject = subject
self.examples = []
self.parent = parent
self.hooks = {'before_each': [], 'after_each': [], 'before_all': [], 'after_all': []}
self._elapsed_time = timedelta(0)
self.execution_context = ExecutionContext() if execution_context is None else execution_context
def run(self, reporter):
self._start(reporter)
try:
self._run_inner_examples(reporter)
except Exception as exception:
self._set_failed()
finally:
self._finish(reporter)
def _start(self, reporter):
self._register_subject_creation_in_before_each_hook()
self._begin = datetime.utcnow()
reporter.example_group_started(self)
def _register_subject_creation_in_before_each_hook(self):
if self._can_create_subject():
self.hooks['before_each'].insert(0, lambda execution_context: self._create_subject(execution_context))
def _can_create_subject(self):
return self._subject_is_class()
def _subject_is_class(self):
return inspect.isclass(self.subject)
#TODO: Being executed on every example, instead of try once
# Should be optimized
def _create_subject(self, execution_context):
try:
execution_context.subject = self.subject()
except Exception as exc:
if hasattr(execution_context, 'subject'):
del execution_context.subject
def _run_inner_examples(self, reporter):
self.run_hook('before_all')
for example in self.examples:
example.run(reporter)
self.run_hook('after_all')
def run_hook(self, hook):
for registered in self.hooks.get(hook, []):
try:
if hasattr(registered, 'im_func'):
registered.im_func(self.execution_context)
elif callable(registered):
registered(self.execution_context)
except Exception as exception:
self._set_failed()
def _set_failed(self):
type_, value, traceback = sys.exc_info()
self.error = error.Error(value, traceback)
def _finish(self, reporter):
self._elapsed_time = datetime.utcnow() - self._begin
reporter.example_group_finished(self)
@property
def elapsed_time(self):
return self._elapsed_time
@property
def name(self):
if self._subject_is_class():
return self.subject.__name__
return self.subject
def append(self, example):
self.examples.append(example)
example.parent = self
@property
def failed(self):
return any(example.failed for example in self.examples)
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = value
for example in self.examples:
example.error = value
class PendingExampleGroup(ExampleGroup):
def run(self, reporter):
reporter.example_group_pending(self)
self._run_inner_examples(reporter)
def _run_inner_examples(self, reporter):
for example in self.examples:
example.run(reporter)
def append(self, example):
if not type(example) in [PendingExample, PendingExampleGroup]:
raise TypeError('A pending example or example group expected')
super(PendingExampleGroup, self).append(example)
|
miscbrah/misccoin
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
JingJunYin/tensorflow
|
refs/heads/master
|
tensorflow/contrib/gan/python/estimator/python/head_test.py
|
27
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training import training
def dummy_loss(gan_model, add_summaries=True): # pylint:disable=unused-argument
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=None,
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
class GANHeadTest(test.TestCase):
def setUp(self):
super(GANHeadTest, self).setUp()
self.gan_head = head.gan_head(
generator_loss_fn=dummy_loss,
discriminator_loss_fn=dummy_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0))
self.assertTrue(isinstance(self.gan_head, head.GANHead))
def _test_modes_helper(self, mode):
self.gan_head.create_estimator_spec(
features=None,
mode=mode,
logits=get_gan_model())
def test_modes_predict(self):
self._test_modes_helper(model_fn_lib.ModeKeys.PREDICT)
def test_modes_eval(self):
self._test_modes_helper(model_fn_lib.ModeKeys.EVAL)
def test_modes_train(self):
self._test_modes_helper(model_fn_lib.ModeKeys.TRAIN)
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.