repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
venth/aws-adfs | refs/heads/master | test/test_version.py | 1 | from aws_adfs import commands
class TestVersion:
def test_returns_version(self):
# given
# when
try:
result = commands.cli(['--version'])
assert False
except SystemExit as e:
# then
assert e.code == 0 |
sharkykh/SickRage | refs/heads/develop | lib/validators/slug.py | 32 | import re
from .utils import validator
slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')
@validator
def slug(value):
"""
Validate whether or not given value is valid slug.
Valid slug can contain only alphanumeric characters, hyphens and
underscores.
Examples::
>>> slug('my.slug')
ValidationFailure(func=slug, args={'value': 'my.slug'})
>>> slug('my-slug-2134')
True
.. versionadded:: 0.6
:param value: value to validate
"""
return slug_regex.match(value)
|
pravisankar/origin | refs/heads/master | vendor/github.com/getsentry/raven-go/docs/_sentryext/verify-docs.py | 37 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This can be run as a commit hook to ensure that the rst files do not contain
some of the more frustrating issues for federated doc support.
"""
import os
import re
import sys
import errno
import subprocess
_ref_target_re = re.compile(r'^\.\.\s+_([^:]+):')
_doc_ref_re = re.compile(r':doc:`([^`]+)`')
_explicit_target_re = re.compile(r'.+?\s+\<(.*?)\>')
def find_git_root():
here = os.getcwd()
while 1:
if os.path.isdir(os.path.join(here, '.git')):
return here
node = os.path.dirname(here)
if node == here:
break
here = node
def get_ref_target(target):
match = _explicit_target_re.search(target)
if match is not None:
return match.group(1)
return target
def find_mistakes(iterable, valid_ref_prefixes=()):
def mistake(message):
return idx + 1, line, message
for idx, line in enumerate(iterable):
# Make sure all lines are prefixed appropriately
match = _ref_target_re.match(line)
if match is not None:
ref_target = match.group(1)
if not ref_target.startswith(valid_ref_prefixes):
yield mistake('Reference is not prefixed with a valid prefix '
'(valid prefixes: %s)' %
', '.join('"%s"' % x for x in valid_ref_prefixes))
# Disallow absolute doc links except /index
match = _doc_ref_re.match(line)
if match is not None:
target = get_ref_target(match.group(1))
if target != '/index' and target[:1] == '/':
yield mistake('Absolute doc link found. This seems like a '
'terrible idea.')
def get_valid_ref_prefixes():
url = subprocess.Popen(['git', 'ls-remote', '--get-url'],
stdout=subprocess.PIPE).communicate()[0].strip()
if not url:
return ()
repo_name = url.split('/')[-1]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
rv = [repo_name + '-']
if repo_name.startswith('raven-'):
rv.append(repo_name[6:] + '-')
return tuple(rv)
def warn(msg):
print >> sys.stderr, 'WARNING: %s' % msg
def find_modified_docs():
stdout = subprocess.Popen(['git', 'diff-index', '--cached',
'--name-only', 'HEAD'],
stdout=subprocess.PIPE).communicate()[0]
for line in stdout.splitlines():
if line.endswith('.rst'):
yield line
def main():
valid_ref_prefixes = None
warnings = 0
for filename in find_modified_docs():
if valid_ref_prefixes is None:
valid_ref_prefixes = get_valid_ref_prefixes()
try:
with open(filename) as f:
mistakes = find_mistakes(f, valid_ref_prefixes)
for lineno, line, msg in mistakes:
warn('%s (%s:%s)' % (
msg,
filename,
lineno,
))
warnings += 1
except IOError as e:
if e.errno != errno.ENOENT:
raise
if warnings > 0:
sys.exit(1)
if __name__ == '__main__':
main()
|
ryano144/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/middleware/common.py | 86 | import re
from django.conf import settings
from django import http
from django.core.mail import mail_managers
from django.utils.http import urlquote
from django.core import urlresolvers
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
logger = getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s' % request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not _is_valid_path(request.path_info, urlconf) and
_is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, (""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1])
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.is_secure() and 'https' or 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.GET:
newurl += '?' + request.META['QUERY_STRING']
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"Send broken link emails and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.get_host()
referer = request.META.get('HTTP_REFERER', None)
is_internal = _is_internal_request(domain, referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\nIP address: %s\n" \
% (referer, request.get_full_path(), ua, ip))
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
else:
etag = '"%s"' % md5_constructor(response.content).hexdigest()
if response.status_code >= 200 and response.status_code < 300 and request.META.get('HTTP_IF_NONE_MATCH') == etag:
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"""
Returns True if a 404 at the given URL *shouldn't* notify the site managers.
"""
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
def _is_internal_request(domain, referer):
"""
Returns true if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return referer is not None and re.match("^https?://%s/" % re.escape(domain), referer)
def _is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
urlresolvers.resolve(path, urlconf)
return True
except urlresolvers.Resolver404:
return False
|
ActiveState/code | refs/heads/master | recipes/Python/578789_Sticky_window/recipe-578789.py | 1 | from PyQt4 import QtGui
from PyQt4.QtGui import QApplication
import sys, ctypes
class WINDOWPOS(ctypes.Structure):
_fields_ = [
('hwnd', ctypes.c_ulong),
('hwndInsertAfter', ctypes.c_ulong),
('x', ctypes.c_int),
('y', ctypes.c_int),
('cx', ctypes.c_int),
('cy', ctypes.c_int),
('flags', ctypes.c_ulong)
]
WM_WINDOWPOSCHANGING = 0x46 #Sent to a window whose size, position, or place in the Z order is about to change
class AuMainWindow(QtGui.QMainWindow):
def winEvent(self, message):
if message.message == WM_WINDOWPOSCHANGING:
stickAt = 10 #px near screen edge
pos = WINDOWPOS.from_address(message.lParam)
mon = QApplication.desktop().availableGeometry(self)
if abs(pos.x - mon.left()) <= stickAt:
pos.x = mon.left()
elif abs(pos.x + pos.cx - mon.right()) <= stickAt:
pos.x = mon.right() - pos.cx
if abs(pos.y - mon.top()) <= stickAt:
pos.y = mon.top()
elif abs(pos.y + pos.cy - mon.bottom()) <= stickAt:
pos.y = mon.bottom() - pos.cy
return False, 0
app = QtGui.QApplication(sys.argv)
mainwnd = AuMainWindow()
mainwnd.show()
sys.exit(app.exec_())
|
almeidapaulopt/erpnext | refs/heads/develop | erpnext/accounts/doctype/pos_profile/test_pos_profile.py | 2 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.stock.get_item_details import get_pos_profile
from erpnext.accounts.doctype.sales_invoice.pos import get_items_list, get_customers_list
class TestPOSProfile(unittest.TestCase):
def test_pos_profile(self):
make_pos_profile()
pos_profile = get_pos_profile("_Test Company") or {}
if pos_profile:
doc = frappe.get_doc("POS Profile", pos_profile.get("name"))
doc.append('item_groups', {'item_group': '_Test Item Group'})
doc.append('customer_groups', {'customer_group': '_Test Customer Group'})
doc.save()
items = get_items_list(doc)
customers = get_customers_list(doc)
products_count = frappe.db.sql(""" select count(name) from tabItem where item_group = '_Test Item Group'""", as_list=1)
customers_count = frappe.db.sql(""" select count(name) from tabCustomer where customer_group = '_Test Customer Group'""")
self.assertEqual(len(items), products_count[0][0])
self.assertEqual(len(customers), customers_count[0][0])
frappe.db.sql("delete from `tabPOS Profile`")
def make_pos_profile():
frappe.db.sql("delete from `tabPOS Profile`")
pos_profile = frappe.get_doc({
"company": "_Test Company",
"cost_center": "_Test Cost Center - _TC",
"currency": "INR",
"doctype": "POS Profile",
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"income_account": "Sales - _TC",
"name": "_Test POS Profile",
"pos_profile_name": "_Test POS Profile",
"naming_series": "_T-POS Profile-",
"selling_price_list": "_Test Price List",
"territory": "_Test Territory",
"customer_group": frappe.db.get_value('Customer Group', {'is_group': 0}, 'name'),
"warehouse": "_Test Warehouse - _TC",
"write_off_account": "_Test Write Off - _TC",
"write_off_cost_center": "_Test Write Off Cost Center - _TC"
})
if not frappe.db.exists("POS Profile", "_Test POS Profile"):
pos_profile.insert() |
windyuuy/opera | refs/heads/master | chromium/src/tools/grit/grit/exception.py | 54 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Exception types for GRIT.
'''
class Base(Exception):
'''A base exception that uses the class's docstring in addition to any
user-provided message as the body of the Base.
'''
def __init__(self, msg=''):
if len(msg):
if self.__doc__:
msg = self.__doc__ + ': ' + msg
else:
msg = self.__doc__
super(Base, self).__init__(msg)
class Parsing(Base):
'''An error occurred parsing a GRD or XTB file.'''
pass
class UnknownElement(Parsing):
'''An unknown node type was encountered.'''
pass
class MissingElement(Parsing):
'''An expected element was missing.'''
pass
class UnexpectedChild(Parsing):
'''An unexpected child element was encountered (on a leaf node).'''
pass
class UnexpectedAttribute(Parsing):
'''The attribute was not expected'''
pass
class UnexpectedContent(Parsing):
'''This element should not have content'''
pass
class MissingMandatoryAttribute(Parsing):
'''This element is missing a mandatory attribute'''
pass
class MutuallyExclusiveMandatoryAttribute(Parsing):
'''This element has 2 mutually exclusive mandatory attributes'''
pass
class DuplicateKey(Parsing):
'''A duplicate key attribute was found.'''
pass
class TooManyExamples(Parsing):
'''Only one <ex> element is allowed for each <ph> element.'''
pass
class GotPathExpectedFilenameOnly(Parsing):
'''The 'filename' attribute of <output> and the 'file' attribute of <part>
must be bare filenames, not paths.
'''
pass
class FileNotFound(Parsing):
'''The resource file was not found.
'''
pass
class InvalidMessage(Base):
'''The specified message failed validation.'''
pass
class InvalidTranslation(Base):
'''Attempt to add an invalid translation to a clique.'''
pass
class NoSuchTranslation(Base):
'''Requested translation not available'''
pass
class NotReady(Base):
'''Attempt to use an object before it is ready, or attempt to translate
an empty document.'''
pass
class TooManyPlaceholders(Base):
'''Too many placeholders for elements of the same type.'''
pass
class MismatchingPlaceholders(Base):
'''Placeholders do not match.'''
pass
class InvalidPlaceholderName(Base):
'''Placeholder name can only contain A-Z, a-z, 0-9 and underscore.'''
pass
class BlockTagInTranslateableChunk(Base):
'''A block tag was encountered where it wasn't expected.'''
pass
class SectionNotFound(Base):
'''The section you requested was not found in the RC file. Make
sure the section ID is correct (matches the section's ID in the RC file).
Also note that you may need to specify the RC file's encoding (using the
encoding="" attribute) if it is not in the default Windows-1252 encoding.
'''
pass
class IdRangeOverlap(Base):
'''ID range overlap.'''
pass
|
stack-of-tasks/rbdlpy | refs/heads/master | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/NV/texture_expand_normal.py | 9 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_texture_expand_normal'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_texture_expand_normal',error_checker=_errors._error_checker)
GL_TEXTURE_UNSIGNED_REMAP_MODE_NV=_C('GL_TEXTURE_UNSIGNED_REMAP_MODE_NV',0x888F)
|
skyostil/tracy | refs/heads/master | src/generator/Cheetah/Macros/I18n.py | 1 | import gettext
_ = gettext.gettext
class I18n(object):
def __init__(self, parser):
pass
## junk I'm playing with to test the macro framework
# def parseArgs(self, parser, startPos):
# parser.getWhiteSpace()
# args = parser.getExpression(useNameMapper=False,
# pyTokensToBreakAt=[':']).strip()
# return args
#
# def convertArgStrToDict(self, args, parser=None, startPos=None):
# def getArgs(*pargs, **kws):
# return pargs, kws
# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
# return kwArgs
def __call__(self,
src, # aka message,
plural=None,
n=None, # should be a string representing the name of the
# '$var' rather than $var itself
id=None,
domain=None,
source=None,
target=None,
comment=None,
# args that are automatically supplied by the parser when the
# macro is called:
parser=None,
macros=None,
isShortForm=False,
EOLCharsInShortForm=None,
startPos=None,
endPos=None,
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
#print macros['i18n']
src = _(src)
if isShortForm and endPos<len(parser):
return src+EOLCharsInShortForm
else:
return src
|
remybaranx/qtaste | refs/heads/master | tools/jython/lib/Lib/xml/sax/saxutils.py | 9 | """
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v 1.37 2005/04/13 14:02:08 syt Exp $
"""
import os, urlparse, urllib2, types
import handler
import xmlreader
import sys, _exceptions, saxlib
from xml.Uri import Absolutize, MakeUrllibSafe,IsAbsolute
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
def __str__(self):
if self.__line is None:
line = "?"
else:
line = self.__line
if self.__col is None:
col = "?"
else:
col = self.__col
return "%s:%s:%s" % (
self.__sysid or self.__pubid or "<unknown>",
line, col)
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
if hasattr(codecs, "register_error"):
def writetext(stream, text, entities={}):
stream.errors = "xmlcharrefreplace"
stream.write(escape(text, entities))
stream.errors = "strict"
else:
def writetext(stream, text, entities={}):
text = escape(text, entities)
try:
stream.write(text)
except UnicodeError:
for c in text:
try:
stream.write(c)
except UnicodeError:
stream.write("&#%d;" % ord(c))
def writeattr(stream, text):
countdouble = text.count('"')
if countdouble:
countsingle = text.count("'")
if countdouble <= countsingle:
entities = {'"': """}
quote = '"'
else:
entities = {"'": "'"}
quote = "'"
else:
entities = {}
quote = '"'
stream.write(quote)
writetext(stream, text, entities)
stream.write(quote)
class XMLGenerator(handler.ContentHandler):
GENERATED_PREFIX = "xml.sax.saxutils.prefix%s"
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._generated_prefix_ctr = 0
return
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
name = name[1]
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write('<' + name)
for k,v in self._undeclared_ns_maps:
if k is None:
self._out.write(' xmlns="%s"' % (v or ''))
else:
self._out.write(' xmlns:%s="%s"' % (k,v))
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
#If an attribute has a nsuri but not a prefix, we must
#create a prefix and add a nsdecl
prefix = self.GENERATED_PREFIX % self._generated_prefix_ctr
self._generated_prefix_ctr = self._generated_prefix_ctr + 1
name = prefix + ':' + name[1]
self._out.write(' xmlns:%s=%s' % (prefix, quoteattr(name[0])))
self._current_context[name[0]] = prefix
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElementNS(self, name, qname):
# XXX: if qname is not None, we better use it.
# Python 2.0b2 requires us to use the recorded prefix for
# name[0], though
if name[0] is None:
qname = name[1]
elif self._current_context[name[0]] is None:
qname = name[1]
else:
qname = self._current_context[name[0]] + ":" + name[1]
self._out.write('</%s>' % qname)
def characters(self, content):
writetext(self._out, content)
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
class LexicalXMLGenerator(XMLGenerator, saxlib.LexicalHandler):
"""A XMLGenerator that also supports the LexicalHandler interface"""
def __init__(self, out=None, encoding="iso-8859-1"):
XMLGenerator.__init__(self, out, encoding)
self._in_cdata = 0
def characters(self, content):
if self._in_cdata:
self._out.write(content.replace(']]>', ']]>]]><![CDATA['))
else:
self._out.write(escape(content))
# LexicalHandler methods
# (we only support the most important ones and inherit the rest)
def startDTD(self, name, public_id, system_id):
self._out.write('<!DOCTYPE %s' % name)
if public_id:
self._out.write(' PUBLIC %s %s' % (
quoteattr(public_id or ""), quoteattr(system_id or "")
))
elif system_id:
self._out.write(' SYSTEM %s' % quoteattr(system_id or ""))
def endDTD(self):
self._out.write('>')
def comment(self, content):
self._out.write('<!--')
self._out.write(content)
self._out.write('-->')
def startCDATA(self):
self._in_cdata = 1
self._out.write('<![CDATA[')
def endCDATA(self):
self._in_cdata = 0
self._out.write(']]>')
# --- ContentGenerator is the SAX1 DocumentHandler for writing back XML
class ContentGenerator(XMLGenerator):
def characters(self, str, start, end):
# In SAX1, characters receives start and end; in SAX2, it receives
# a string. For plain strings, we may want to use a buffer object.
return XMLGenerator.characters(self, str[start:start+end])
# --- XMLFilterImpl
class XMLFilterBase(saxlib.XMLFilter):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# FIXME: remove this backward compatibility hack when not needed anymore
XMLFilterImpl = XMLFilterBase
# --- BaseIncrementalParser
class BaseIncrementalParser(xmlreader.IncrementalParser):
"""This class implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def parse(self, source):
source = prepare_input_source(source)
self.prepareParser(source)
self._cont_handler.startDocument()
# FIXME: what about char-stream?
inf = source.getByteStream()
buffer = inf.read(16384)
while buffer != "":
self.feed(buffer)
buffer = inf.read(16384)
self.close()
self.reset()
self._cont_handler.endDocument()
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(absolute_system_id(f.name, base))
if source.getByteStream() is None:
sysid = absolute_system_id(source.getSystemId(), base)
source.setSystemId(sysid)
f = urllib2.urlopen(sysid)
source.setByteStream(f)
return source
def absolute_system_id(sysid, base=''):
if os.path.exists(sysid):
sysid = 'file:%s' % os.path.abspath(sysid)
elif base:
sysid = Absolutize(sysid, base)
assert IsAbsolute(sysid)
return MakeUrllibSafe(sysid)
# ===========================================================================
#
# DEPRECATED SAX 1.0 CLASSES
#
# ===========================================================================
# --- AttributeMap
class AttributeMap:
"""An implementation of AttributeList that takes an (attr,val) hash
and uses it to implement the AttributeList interface."""
def __init__(self, map):
self.map=map
def getLength(self):
return len(self.map.keys())
def getName(self, i):
try:
return self.map.keys()[i]
except IndexError,e:
return None
def getType(self, i):
return "CDATA"
def getValue(self, i):
try:
if type(i)==types.IntType:
return self.map[self.getName(i)]
else:
return self.map[i]
except KeyError,e:
return None
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key)==types.IntType:
return self.map.keys()[key]
else:
return self.map[key]
def items(self):
return self.map.items()
def keys(self):
return self.map.keys()
def has_key(self,key):
return self.map.has_key(key)
def get(self, key, alternative=None):
return self.map.get(key, alternative)
def copy(self):
return AttributeMap(self.map.copy())
def values(self):
return self.map.values()
# --- Event broadcasting object
class EventBroadcaster:
"""Takes a list of objects and forwards any method calls received
to all objects in the list. The attribute list holds the list and
can freely be modified by clients."""
class Event:
"Helper objects that represent event methods."
def __init__(self,list,name):
self.list=list
self.name=name
def __call__(self,*rest):
for obj in self.list:
apply(getattr(obj,self.name), rest)
def __init__(self,list):
self.list=list
def __getattr__(self,name):
return self.Event(self.list,name)
def __repr__(self):
return "<EventBroadcaster instance at %d>" % id(self)
# --- ESIS document handler
import saxlib
class ESISDocHandler(saxlib.HandlerBase):
"A SAX document handler that produces naive ESIS output."
def __init__(self,writer=sys.stdout):
self.writer=writer
def processingInstruction (self,target, remainder):
"""Receive an event signalling that a processing instruction
has been found."""
self.writer.write("?"+target+" "+remainder+"\n")
def startElement(self,name,amap):
"Receive an event signalling the start of an element."
self.writer.write("("+name+"\n")
for a_name in amap.keys():
self.writer.write("A"+a_name+" "+amap[a_name]+"\n")
def endElement(self,name):
"Receive an event signalling the end of an element."
self.writer.write(")"+name+"\n")
def characters(self,data,start_ix,length):
"Receive an event signalling that character data has been found."
self.writer.write("-"+data[start_ix:start_ix+length]+"\n")
# --- XML canonizer
class Canonizer(saxlib.HandlerBase):
"A SAX document handler that produces canonized XML output."
def __init__(self,writer=sys.stdout):
self.elem_level=0
self.writer=writer
def processingInstruction (self,target, remainder):
if not target=="xml":
self.writer.write("<?"+target+" "+remainder+"?>")
def startElement(self,name,amap):
self.writer.write("<"+name)
a_names=amap.keys()
a_names.sort()
for a_name in a_names:
self.writer.write(" "+a_name+"=\"")
self.write_data(amap[a_name])
self.writer.write("\"")
self.writer.write(">")
self.elem_level=self.elem_level+1
def endElement(self,name):
self.writer.write("</"+name+">")
self.elem_level=self.elem_level-1
def ignorableWhitespace(self,data,start_ix,length):
self.characters(data,start_ix,length)
def characters(self,data,start_ix,length):
if self.elem_level>0:
self.write_data(data[start_ix:start_ix+length])
def write_data(self,data):
"Writes datachars to writer."
data=data.replace("&","&")
data=data.replace("<","<")
data=data.replace("\"",""")
data=data.replace(">",">")
data=data.replace(chr(9),"	")
data=data.replace(chr(10)," ")
data=data.replace(chr(13)," ")
self.writer.write(data)
# --- mllib
class mllib:
"""A re-implementation of the htmllib, sgmllib and xmllib interfaces as a
SAX DocumentHandler."""
# Unsupported:
# - setnomoretags
# - setliteral
# - translate_references
# - handle_xml
# - handle_doctype
# - handle_charref
# - handle_entityref
# - handle_comment
# - handle_cdata
# - tag_attributes
def __init__(self):
self.reset()
def reset(self):
import saxexts # only used here
self.parser=saxexts.XMLParserFactory.make_parser()
self.handler=mllib.Handler(self.parser,self)
self.handler.reset()
def feed(self,data):
self.parser.feed(data)
def close(self):
self.parser.close()
def get_stack(self):
return self.handler.get_stack()
# --- Handler methods (to be overridden)
def handle_starttag(self,name,method,atts):
method(atts)
def handle_endtag(self,name,method):
method()
def handle_data(self,data):
pass
def handle_proc(self,target,data):
pass
def unknown_starttag(self,name,atts):
pass
def unknown_endtag(self,name):
pass
def syntax_error(self,message):
pass
# --- The internal handler class
class Handler(saxlib.DocumentHandler,saxlib.ErrorHandler):
"""An internal class to handle SAX events and translate them to mllib
events."""
def __init__(self,driver,handler):
self.driver=driver
self.driver.setDocumentHandler(self)
self.driver.setErrorHandler(self)
self.handler=handler
self.reset()
def get_stack(self):
return self.stack
def reset(self):
self.stack=[]
# --- DocumentHandler methods
def characters(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def endElement(self, name):
if hasattr(self.handler,"end_"+name):
self.handler.handle_endtag(name,
getattr(self.handler,"end_"+name))
else:
self.handler.unknown_endtag(name)
del self.stack[-1]
def ignorableWhitespace(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def processingInstruction(self, target, data):
self.handler.handle_proc(target,data)
def startElement(self, name, atts):
self.stack.append(name)
if hasattr(self.handler,"start_"+name):
self.handler.handle_starttag(name,
getattr(self.handler,
"start_"+name),
atts)
else:
self.handler.unknown_starttag(name,atts)
# --- ErrorHandler methods
def error(self, exception):
self.handler.syntax_error(str(exception))
def fatalError(self, exception):
raise RuntimeError(str(exception))
|
abdoosh00/edraak | refs/heads/master | lms/envs/cms/microsite_test.py | 1 | """
This is a localdev test for the Microsite processing pipeline
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
from ..dev import ENV_ROOT, FEATURES
MICROSITE_CONFIGURATION = {
"openedx": {
"domain_prefix": "openedx",
"university": "openedx",
"platform_name": "Open edX",
"logo_image_url": "openedx/images/edraak-logo.jpg",
"email_from_address": "openedx@edx.org",
"payment_support_email": "openedx@edx.org",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "openedx.localhost",
"course_org_filter": "CDX",
"course_about_show_social_links": False,
"css_overrides_file": "openedx/css/openedx.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "Explore free courses from leading universities.",
"course_index_overlay_logo_file": "openedx/images/header-logo.png",
"homepage_overlay_html": "<h1>Take an Open edX Course</h1>"
}
}
MICROSITE_ROOT_DIR = ENV_ROOT / 'edx-microsite'
# pretend we are behind some marketing site, we want to be able to assert that the Microsite config values override
# this global setting
FEATURES['ENABLE_MKTG_SITE'] = True
FEATURES['USE_MICROSITES'] = True
|
azjps/bokeh | refs/heads/master | examples/charts/file/stacked_bar.py | 7 | from bokeh.charts import Bar, output_file, show
from bokeh.charts.attributes import cat, color
from bokeh.charts.operations import blend
from bokeh.charts.utils import df_from_json
from bokeh.sampledata.olympics2014 import data
# utilize utility to make it easy to get json/dict data converted to a dataframe
df = df_from_json(data)
# filter by countries with at least one medal and sort by total medals
df = df[df['total'] > 0]
df = df.sort("total", ascending=False)
bar = Bar(df,
values=blend('bronze', 'silver', 'gold', name='medals', labels_name='medal'),
label=cat(columns='abbr', sort=False),
stack=cat(columns='medal', sort=False),
color=color(columns='medal', palette=['SaddleBrown', 'Silver', 'Goldenrod'],
sort=False),
legend='top_right',
title="Medals per Country, Sorted by Total Medals",
tooltips=[('medal', '@medal'), ('country', '@abbr')])
output_file("stacked_bar.html", title="stacked_bar.py example")
show(bar)
|
TathagataChakraborti/resource-conflicts | refs/heads/master | PLANROB-2015/py2.5/lib/python2.5/idlelib/testcode.py | 312 | import string
def f():
a = 0
b = 1
c = 2
d = 3
e = 4
g()
def g():
h()
def h():
i()
def i():
j()
def j():
k()
def k():
l()
l = lambda: test()
def test():
string.capwords(1)
f()
|
iulian787/spack | refs/heads/develop | var/spack/repos/builtin/packages/heputils/package.py | 2 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Heputils(MakefilePackage):
"""Generic tools for high energy physics, e.g. vectors, event records,
math and other util functions."""
homepage = "https://bitbucket.org/andybuckley/heputils/src/default/"
url = "https://bitbucket.org/andybuckley/heputils/get/heputils-1.3.2.tar.gz"
tags = ['hep']
version('1.3.2', sha256='be43586979ab1a81a55348d795c2f63a5da19fc6367d5f66f354217c76c809c0')
version('1.3.1', sha256='7f33ef44364a3d3a39cc65005fb6aa9dfd06bd1b18b41151c0e5e3d28d6ba15b')
version('1.3.0', sha256='1ec9d9d71d409ce6b2e668e4927b1090ddf2ee9acf25457f767925cf89b24852')
version('1.2.1', sha256='99f0b27cddffb98977d37418d53f3386e5defda547aeb4c4fda00ab6fcf2cc31')
version('1.2.0', sha256='0f9f96bd7589f9aec8f1271524b8622291216fe2294ffed772b84d010759eaef')
version('1.1.0', sha256='671374641cdb6dc093327b69da2d2854df805b6eb8e90f0efefb0788ee4a2edd')
version('1.0.8', sha256='9b9a45ebff1367cd2ab1ec4ee8c0e124a9b7ed66c75d8961412163ade1962d91')
version('1.0.7', sha256='481a26755d4e2836563d1f8fcdad663bfa7e21b9878c01bd8a73a67876726b81')
version('1.0.6', sha256='1ecd8597ef7921a63606b21136900a05a818c9342da7994a42aae768ecca507f')
version('1.0.5', sha256='efff3d7d6973822f1dced903017f86661e2d054ff3f0d4fe926de2347160e329')
version('1.0.4', sha256='aeca00c1012bce469c6fe6393edbf4f33043ab671c97a8283a21861caee8b1b4')
version('1.0.3', sha256='8e7ebe0ad5e87a97cbbff7097092ed8afe5a2d1ecae0f4d4f9a7bf694e221d40')
version('1.0.2', sha256='83ba7876d884406463cc8ae42214038b7d6c40ead77a1532d64bc96887173f75')
version('1.0.1', sha256='4bfccc4f4380becb776343e546deb2474deeae79f053ba8ca22287827b8bd4b1')
version('1.0.0', sha256='4f71c2bee6736ed87d0151e62546d2fc9ff639db58172c26dcf033e5bb1ea04c')
def build(self, spec, prefix):
return
def install(self, spec, prefix):
make('install', 'PREFIX={0}'.format(prefix))
|
kronicz/ecommerce-2 | refs/heads/master | lib/python2.7/site-packages/registration/utils.py | 7 | import sys
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.utils import six
def import_string(dotted_path):
"""
COPIED FROM DJANGO 1.7 (django.utils.module_loading.import_string)
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
|
oculusstorystudio/kraken | refs/heads/develop_OSS | Python/kraken_components/biped/hand_component.py | 2 |
from collections import OrderedDict
from kraken.core.maths import Vec3
from kraken.core.maths.xfo import Xfo
from kraken.core.objects.components.base_example_component import BaseExampleComponent
from kraken.core.objects.attributes.attribute_group import AttributeGroup
from kraken.core.objects.attributes.scalar_attribute import ScalarAttribute
from kraken.core.objects.attributes.integer_attribute import IntegerAttribute
from kraken.core.objects.attributes.string_attribute import StringAttribute
from kraken.core.objects.attributes.bool_attribute import BoolAttribute
from kraken.core.objects.constraints.pose_constraint import PoseConstraint
from kraken.core.objects.component_group import ComponentGroup
from kraken.core.objects.hierarchy_group import HierarchyGroup
from kraken.core.objects.joint import Joint
from kraken.core.objects.ctrlSpace import CtrlSpace
from kraken.core.objects.control import Control
from kraken.core.objects.operators.canvas_operator import CanvasOperator
from kraken.core.objects.operators.kl_operator import KLOperator
from kraken.core.profiler import Profiler
class HandComponent(BaseExampleComponent):
"""Hand Component Base"""
def __init__(self, name='hand', parent=None, *args, **kwargs):
super(HandComponent, self).__init__(name, parent, *args, **kwargs)
# ===========
# Declare IO
# ===========
# Declare Inputs Xfos
self.globalSRTInputTgt = self.createInput('globalSRT', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
self.armEndInputTgt = self.createInput('armEnd', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
# Declare Output Xfos
self.handOutputTgt = self.createOutput('hand', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
# Declare Input Attrs
self.drawDebugInputAttr = self.createInput('drawDebug', dataType='Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
self.rigScaleInputAttr = self.createInput('rigScale', dataType='Float', value=1.0, parent=self.cmpInputAttrGrp).getTarget()
# Declare Output Attrs
class HandComponentGuide(HandComponent):
"""Hand Component Guide"""
def __init__(self, name='hand', parent=None, *args, **kwargs):
Profiler.getInstance().push("Construct Hand Guide Component:" + name)
super(HandComponentGuide, self).__init__(name, parent, *args, **kwargs)
# =========
# Controls
# =========
# Guide Controls
self.guideSettingsAttrGrp = AttributeGroup("GuideSettings", parent=self)
self.digitNamesAttr = StringAttribute('digitNames', value="thumb,index,middle,ring,pinky", parent=self.guideSettingsAttrGrp)
self.digitNamesAttr.setValueChangeCallback(self.updateFingers)
self.numJointsAttr = IntegerAttribute('numJoints', value=4, minValue=2, maxValue=20, parent=self.guideSettingsAttrGrp)
self.numJointsAttr.setValueChangeCallback(self.resizeDigits)
self.fingers = OrderedDict()
self.handCtrl = Control('hand', parent=self.ctrlCmpGrp, shape="square")
self.handCtrl.rotatePoints(0.0, 0.0, 90.0)
self.handCtrl.scalePoints(Vec3(1.0, 0.75, 1.0))
self.handCtrl.setColor('yellow')
self.handGuideSettingsAttrGrp = AttributeGroup("Settings", parent=self.handCtrl)
self.ctrlShapeToggle = BoolAttribute('ctrlShape_vis', value=False, parent=self.handGuideSettingsAttrGrp)
self.handDebugInputAttr = BoolAttribute('drawDebug', value=False, parent=self.handGuideSettingsAttrGrp)
self.drawDebugInputAttr.connect(self.handDebugInputAttr)
self.guideCtrlHrcGrp = HierarchyGroup('controlShapes', parent=self.ctrlCmpGrp)
self.default_data = {
"name": name,
"location": "L",
"handXfo": Xfo(Vec3(7.1886, 12.2819, 0.4906)),
"digitNames": self.digitNamesAttr.getValue(),
"numJoints": self.numJointsAttr.getValue(),
"fingers": self.fingers
}
self.loadData(self.default_data)
Profiler.getInstance().pop()
# =============
# Data Methods
# =============
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(HandComponentGuide, self).saveData()
data['handXfo'] = self.handCtrl.xfo
data['digitNames'] = self.digitNamesAttr.getValue()
data['numJoints'] = self.numJointsAttr.getValue()
fingerXfos = {}
fingerShapeCtrlData = {}
for finger in self.fingers.keys():
fingerXfos[finger] = [x.xfo for x in self.fingers[finger]]
fingerShapeCtrlData[finger] = []
for i, digit in enumerate(self.fingers[finger]):
if i != len(self.fingers[finger]) - 1:
fingerShapeCtrlData[finger].append(digit.shapeCtrl.getCurveData())
data['fingersGuideXfos'] = fingerXfos
data['fingerShapeCtrlData'] = fingerShapeCtrlData
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(HandComponentGuide, self).loadData(data)
self.handCtrl.xfo = data.get('handXfo')
self.numJointsAttr.setValue(data.get('numJoints'))
self.digitNamesAttr.setValue(data.get('digitNames'))
fingersGuideXfos = data.get('fingersGuideXfos')
fingerShapeCtrlData = data.get('fingerShapeCtrlData')
if fingersGuideXfos is not None:
for finger in self.fingers.keys():
for i in xrange(len(self.fingers[finger])):
self.fingers[finger][i].xfo = fingersGuideXfos[finger][i]
if hasattr(self.fingers[finger][i], 'shapeCtrl'):
if fingerShapeCtrlData is not None:
if finger in fingerShapeCtrlData:
self.fingers[finger][i].shapeCtrl.setCurveData(fingerShapeCtrlData[finger][i])
for op in self.getOperators():
guideOpName = ''.join([op.getName().split('FingerGuideOp')[0], self.getLocation(), 'FingerGuideOp'])
op.setName(guideOpName)
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig..
Return:
The JSON rig data object.
"""
data = super(HandComponentGuide, self).getRigBuildData()
data['handXfo'] = self.handCtrl.xfo
fingerData = {}
for finger in self.fingers.keys():
fingerData[finger] = []
for i, joint in enumerate(self.fingers[finger]):
if i == len(self.fingers[finger]) - 1:
continue
# Calculate Xfo
boneVec = self.fingers[finger][i + 1].xfo.tr - self.fingers[finger][i].xfo.tr
bone1Normal = self.fingers[finger][i].xfo.ori.getZaxis().cross(boneVec).unit()
bone1ZAxis = boneVec.cross(bone1Normal).unit()
jointXfo = Xfo()
jointXfo.setFromVectors(boneVec.unit(), bone1Normal, bone1ZAxis, self.fingers[finger][i].xfo.tr)
jointData = {
'curveData': self.fingers[finger][i].shapeCtrl.getCurveData(),
'length': self.fingers[finger][i].xfo.tr.distanceTo(self.fingers[finger][i + 1].xfo.tr),
'xfo': jointXfo
}
fingerData[finger].append(jointData)
data['fingerData'] = fingerData
return data
# ==========
# Callbacks
# ==========
def addFinger(self, name):
digitSizeAttributes = []
fingerGuideCtrls = []
firstDigitCtrl = Control(name + "01", parent=self.handCtrl, shape='sphere')
firstDigitCtrl.scalePoints(Vec3(0.125, 0.125, 0.125))
firstDigitShapeCtrl = Control(name + "Shp01", parent=self.guideCtrlHrcGrp, shape='square')
firstDigitShapeCtrl.setColor('yellow')
firstDigitShapeCtrl.scalePoints(Vec3(0.175, 0.175, 0.175))
firstDigitShapeCtrl.translatePoints(Vec3(0.0, 0.125, 0.0))
fingerGuideCtrls.append(firstDigitShapeCtrl)
firstDigitCtrl.shapeCtrl = firstDigitShapeCtrl
firstDigitVisAttr = firstDigitShapeCtrl.getVisibilityAttr()
firstDigitVisAttr.connect(self.ctrlShapeToggle)
triangleCtrl = Control('tempCtrl', parent=None, shape='triangle')
triangleCtrl.rotatePoints(90.0, 0.0, 0.0)
triangleCtrl.scalePoints(Vec3(0.025, 0.025, 0.025))
triangleCtrl.translatePoints(Vec3(0.0, 0.0875, 0.0))
firstDigitCtrl.appendCurveData(triangleCtrl.getCurveData())
firstDigitCtrl.lockScale(True, True, True)
digitSettingsAttrGrp = AttributeGroup("DigitSettings", parent=firstDigitCtrl)
digitSizeAttr = ScalarAttribute('size', value=0.25, parent=digitSettingsAttrGrp)
digitSizeAttributes.append(digitSizeAttr)
# Set Finger
self.fingers[name] = []
self.fingers[name].append(firstDigitCtrl)
parent = firstDigitCtrl
numJoints = self.numJointsAttr.getValue()
if name == "thumb":
numJoints = 3
for i in xrange(2, numJoints + 2):
digitCtrl = Control(name + str(i).zfill(2), parent=parent, shape='sphere')
if i != numJoints + 1:
digitCtrl.scalePoints(Vec3(0.125, 0.125, 0.125))
digitCtrl.appendCurveData(triangleCtrl.getCurveData())
digitShapeCtrl = Control(name + 'Shp' + str(i).zfill(2), parent=self.guideCtrlHrcGrp, shape='circle')
digitShapeCtrl.setColor('yellow')
digitShapeCtrl.scalePoints(Vec3(0.175, 0.175, 0.175))
digitShapeCtrl.getVisibilityAttr().connect(self.ctrlShapeToggle)
digitCtrl.shapeCtrl = digitShapeCtrl
if i == 2:
digitShapeCtrl.translatePoints(Vec3(0.0, 0.125, 0.0))
else:
digitShapeCtrl.rotatePoints(0.0, 0.0, 90.0)
fingerGuideCtrls.append(digitShapeCtrl)
# Add size attr to all but last guide control
digitSettingsAttrGrp = AttributeGroup("DigitSettings", parent=digitCtrl)
digitSizeAttr = ScalarAttribute('size', value=0.25, parent=digitSettingsAttrGrp)
digitSizeAttributes.append(digitSizeAttr)
else:
digitCtrl.scalePoints(Vec3(0.0875, 0.0875, 0.0875))
digitCtrl.lockScale(True, True, True)
self.fingers[name].append(digitCtrl)
parent = digitCtrl
# ===========================
# Create Canvas Operators
# ===========================
# Add Finger Guide Canvas Op
fingerGuideCanvasOp = CanvasOperator(name + 'FingerGuide', 'Kraken.Solvers.Biped.BipedFingerGuideSolver')
self.addOperator(fingerGuideCanvasOp)
# Add Att Inputs
fingerGuideCanvasOp.setInput('drawDebug', self.drawDebugInputAttr)
fingerGuideCanvasOp.setInput('rigScale', self.rigScaleInputAttr)
# Add Xfo Inputs
fingerGuideCanvasOp.setInput('controls', self.fingers[name])
fingerGuideCanvasOp.setInput('planeSizes', digitSizeAttributes)
# Add Xfo Outputs
fingerGuideCanvasOp.setOutput('result', fingerGuideCtrls)
fingerGuideCanvasOp.setOutput('forceEval', firstDigitCtrl.getVisibilityAttr())
return firstDigitCtrl
def removeFinger(self, name):
self.handCtrl.removeChild(self.fingers[name][0])
del self.fingers[name]
def placeFingers(self):
spacing = 0.25
length = spacing * (len(self.fingers.keys()) - 1)
mid = length / 2.0
startOffset = length - mid
for i, finger in enumerate(self.fingers.keys()):
parentCtrl = self.handCtrl
numJoints = self.numJointsAttr.getValue()
if finger == "thumb":
numJoints = 3
for y in xrange(numJoints + 1):
if y == 1:
xOffset = 0.375
else:
xOffset = 0.25
if y == 0:
offsetVec = Vec3(xOffset, 0, startOffset - (i * spacing))
else:
offsetVec = Vec3(xOffset, 0, 0)
fingerPos = parentCtrl.xfo.transformVector(offsetVec)
fingerXfo = Xfo(tr=fingerPos, ori=self.handCtrl.xfo.ori)
self.fingers[finger][y].xfo = fingerXfo
parentCtrl = self.fingers[finger][y]
def updateFingers(self, fingers):
if " " in fingers:
self.digitNamesAttr.setValue(fingers.replace(" ", ""))
return
fingerNames = fingers.split(',')
# Delete fingers that don't exist any more
for finger in list(set(self.fingers.keys()) - set(fingerNames)):
self.removeFinger(finger)
# Add Fingers
for finger in fingerNames:
if finger in self.fingers.keys():
continue
self.addFinger(finger)
self.placeFingers()
def resizeDigits(self, numJoints):
initNumJoints = numJoints
for finger in self.fingers.keys():
if finger == "thumb":
numJoints = 3
else:
numJoints = initNumJoints
if numJoints + 1 == len(self.fingers[finger]):
continue
elif numJoints + 1 > len(self.fingers[finger]):
for i in xrange(len(self.fingers[finger]), numJoints + 1):
prevDigit = self.fingers[finger][i - 1]
digitCtrl = Control(finger + str(i + 1).zfill(2), parent=prevDigit, shape='sphere')
digitCtrl.setColor('orange')
digitCtrl.scalePoints(Vec3(0.25, 0.25, 0.25))
digitCtrl.lockScale(True, True, True)
self.fingers[finger].append(digitCtrl)
elif numJoints + 1 < len(self.fingers[finger]):
numExtraCtrls = len(self.fingers[finger]) - (numJoints + 1)
for i in xrange(numExtraCtrls):
removedJoint = self.fingers[finger].pop()
removedJoint.getParent().removeChild(removedJoint)
self.placeFingers()
# ==============
# Class Methods
# ==============
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return HandComponentRig
class HandComponentRig(HandComponent):
"""Hand Component"""
def __init__(self, name='Hand', parent=None):
Profiler.getInstance().push("Construct Hand Rig Component:" + name)
super(HandComponentRig, self).__init__(name, parent)
# =========
# Controls
# =========
# Hand
self.handCtrlSpace = CtrlSpace('hand', parent=self.ctrlCmpGrp)
self.handCtrl = Control('hand', parent=self.handCtrlSpace, shape="square")
self.handCtrl.rotatePoints(0, 0, 90.0)
self.handCtrl.lockScale(True, True, True)
self.handCtrl.lockTranslation(True, True, True)
# ==========
# Deformers
# ==========
self.deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=self.deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.handDef = Joint('hand', parent=self.defCmpGrp)
self.handDef.setComponent(self)
# ==============
# Constrain I/O
# ==============
# Constraint inputs
self.armEndInputConstraint = PoseConstraint('_'.join([self.handCtrlSpace.getName(), 'To', self.armEndInputTgt.getName()]))
self.armEndInputConstraint.setMaintainOffset(True)
self.armEndInputConstraint.addConstrainer(self.armEndInputTgt)
self.handCtrlSpace.addConstraint(self.armEndInputConstraint)
# Constraint outputs
self.handOutputConstraint = PoseConstraint('_'.join([self.handOutputTgt.getName(), 'To', self.handCtrl.getName()]))
self.handOutputConstraint.addConstrainer(self.handCtrl)
self.handOutputTgt.addConstraint(self.handOutputConstraint)
# Constraint deformers
self.handDefConstraint = PoseConstraint('_'.join([self.handDef.getName(), 'To', self.handCtrl.getName()]))
self.handDefConstraint.addConstrainer(self.handCtrl)
self.handDef.addConstraint(self.handDefConstraint)
Profiler.getInstance().pop()
def addFinger(self, name, data):
fingerCtrls = []
fingerJoints = []
parentCtrl = self.handCtrl
for i, joint in enumerate(data):
if i == 0:
jointName = name + 'Meta'
else:
jointName = name + str(i).zfill(2)
jointXfo = joint.get('xfo', Xfo())
jointCrvData = joint.get('curveData')
# Create Controls
newJointCtrlSpace = CtrlSpace(jointName, parent=parentCtrl)
newJointCtrl = Control(jointName, parent=newJointCtrlSpace, shape='square')
newJointCtrl.lockScale(True, True, True)
newJointCtrl.lockTranslation(True, True, True)
if jointCrvData is not None:
newJointCtrl.setCurveData(jointCrvData)
fingerCtrls.append(newJointCtrl)
# Create Deformers
jointDef = Joint(jointName, parent=self.defCmpGrp)
fingerJoints.append(jointDef)
# Create Constraints
# Set Xfos
newJointCtrlSpace.xfo = jointXfo
newJointCtrl.xfo = jointXfo
parentCtrl = newJointCtrl
# =================
# Create Operators
# =================
# Add Deformer KL Op
deformersToCtrlsKLOp = KLOperator(name + 'DefConstraint', 'MultiPoseConstraintSolver', 'Kraken')
self.addOperator(deformersToCtrlsKLOp)
# Add Att Inputs
deformersToCtrlsKLOp.setInput('drawDebug', self.drawDebugInputAttr)
deformersToCtrlsKLOp.setInput('rigScale', self.rigScaleInputAttr)
# Add Xfo Inputs
deformersToCtrlsKLOp.setInput('constrainers', fingerCtrls)
# Add Xfo Outputs
deformersToCtrlsKLOp.setOutput('constrainees', fingerJoints)
return deformersToCtrlsKLOp
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(HandComponentRig, self).loadData(data)
# Data
fingerData = data.get('fingerData')
handXfo = data.get('handXfo', Xfo())
self.handCtrlSpace.xfo = handXfo
self.handCtrl.xfo = handXfo
fingerOps = []
for finger in fingerData.keys():
fingerOp = self.addFinger(finger, fingerData[finger])
fingerOps.append(fingerOp)
# ============
# Set IO Xfos
# ============
self.armEndInputTgt.xfo = handXfo
self.handOutputTgt.xfo = handXfo
# Eval Constraints
self.armEndInputConstraint.evaluate()
self.handOutputConstraint.evaluate()
self.handDefConstraint.evaluate()
# Eval Operators
for op in fingerOps:
op.evaluate()
from kraken.core.kraken_system import KrakenSystem
ks = KrakenSystem.getInstance()
ks.registerComponent(HandComponentGuide)
ks.registerComponent(HandComponentRig)
|
jsma/django-cms | refs/heads/develop | cms/tests/test_docs.py | 6 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from unittest import skipIf, skipUnless
import os
import socket
import sys
import django
from django.utils.six.moves import StringIO
from sphinx.application import Sphinx, SphinxWarning
try:
import enchant
except ImportError:
enchant = None
import cms
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import TemporaryDirectory
ROOT_DIR = os.path.dirname(cms.__file__)
DOCS_DIR = os.path.abspath(os.path.join(ROOT_DIR, u'..', u'docs'))
def has_no_internet():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(5)
s.connect(('4.4.4.2', 80))
s.send(b"hello")
except socket.error: # no internet
return True
return False
@contextmanager
def tmp_list_append(l, x):
l.append(x)
try:
yield
finally:
if x in l:
l.remove(x)
class DocsTestCase(CMSTestCase):
"""
Test docs building correctly for HTML
"""
@skipIf(has_no_internet(), "No internet")
def test_html(self):
status = StringIO()
with TemporaryDirectory() as OUT_DIR:
app = Sphinx(
srcdir=DOCS_DIR,
confdir=DOCS_DIR,
outdir=OUT_DIR,
doctreedir=OUT_DIR,
buildername="html",
warningiserror=True,
status=status,
)
try:
app.build()
except:
print(status.getvalue())
raise
@skipIf(has_no_internet(), "No internet")
@skipIf(enchant is None, "Enchant not installed")
@skipUnless(django.VERSION[:2] == (1, 8)
and sys.version_info[:2] == (3, 4)
and os.environ.get('DATABASE_URL') == 'sqlite://localhost/:memory:',
'Skipping for simplicity')
def test_spelling(self):
status = StringIO()
with TemporaryDirectory() as OUT_DIR:
with tmp_list_append(sys.argv, 'spelling'):
try:
app = Sphinx(
srcdir=DOCS_DIR,
confdir=DOCS_DIR,
outdir=OUT_DIR,
doctreedir=OUT_DIR,
buildername="spelling",
warningiserror=True,
status=status,
confoverrides={
'extensions': [
'djangocms',
'sphinx.ext.intersphinx',
'sphinxcontrib.spelling'
]
}
)
app.build()
self.assertEqual(app.statuscode, 0, status.getvalue())
except SphinxWarning:
# while normally harmless, causes a test failure
pass
except:
print(status.getvalue())
raise
|
ehashman/oh-mainline | refs/heads/master | vendor/packages/twisted/twisted/news/test/test_database.py | 17 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.news.database}.
"""
__metaclass__ = type
from email.Parser import Parser
from socket import gethostname
from twisted.trial.unittest import TestCase
from twisted.internet.defer import succeed
from twisted.mail.smtp import messageid
from twisted.news.database import Article, PickleStorage, NewsShelf
class ModerationTestsMixin:
"""
Tests for the moderation features of L{INewsStorage} implementations.
"""
def setUp(self):
self._email = []
def sendmail(self, smtphost, from_addr, to_addrs, msg,
senderDomainName=None, port=25):
"""
Fake of L{twisted.mail.smtp.sendmail} which records attempts to send
email and immediately pretends success.
Subclasses should arrange for their storage implementation to call this
instead of the real C{sendmail} function.
"""
self._email.append((
smtphost, from_addr, to_addrs, msg, senderDomainName, port))
return succeed(None)
_messageTemplate = """\
From: some dude
To: another person
Subject: activities etc
Message-ID: %(articleID)s
Newsgroups: %(newsgroup)s
%(approved)s
Body of the message is such.
""".replace('\n', '\r\n')
def getApprovedMessage(self, articleID, group):
"""
Return a C{str} containing an RFC 2822 formatted message including an
I{Approved} header indicating it has passed through moderation.
"""
return self._messageTemplate % {
'articleID': articleID,
'newsgroup': group,
'approved': 'Approved: yup\r\n'}
def getUnapprovedMessage(self, articleID, group):
"""
Return a C{str} containing an RFC 2822 formatted message with no
I{Approved} header indicating it may require moderation.
"""
return self._messageTemplate % {
'articleID': articleID,
'newsgroup': group,
'approved': '\r\n'}
def getStorage(self, groups, moderators, mailhost, sender):
"""
Override in a subclass to return a L{INewsStorage} provider to test for
correct moderation behavior.
@param groups: A C{list} of C{str} naming the groups which should exist
in the resulting storage object.
@param moderators: A C{dict} mapping C{str} each group name to a C{list}
of C{str} giving moderator email (RFC 2821) addresses.
"""
raise NotImplementedError()
def test_postApproved(self):
"""
L{INewsStorage.postRequest} posts the message if it includes an
I{Approved} header.
"""
group = "example.group"
moderator = "alice@example.com"
mailhost = "127.0.0.1"
sender = "bob@example.org"
articleID = messageid()
storage = self.getStorage(
[group], {group: [moderator]}, mailhost, sender)
message = self.getApprovedMessage(articleID, group)
result = storage.postRequest(message)
def cbPosted(ignored):
self.assertEquals(self._email, [])
exists = storage.articleExistsRequest(articleID)
exists.addCallback(self.assertTrue)
return exists
result.addCallback(cbPosted)
return result
def test_postModerated(self):
"""
L{INewsStorage.postRequest} forwards a message to the moderator if it
does not include an I{Approved} header.
"""
group = "example.group"
moderator = "alice@example.com"
mailhost = "127.0.0.1"
sender = "bob@example.org"
articleID = messageid()
storage = self.getStorage(
[group], {group: [moderator]}, mailhost, sender)
message = self.getUnapprovedMessage(articleID, group)
result = storage.postRequest(message)
def cbModerated(ignored):
self.assertEquals(len(self._email), 1)
self.assertEquals(self._email[0][0], mailhost)
self.assertEquals(self._email[0][1], sender)
self.assertEquals(self._email[0][2], [moderator])
self._checkModeratorMessage(
self._email[0][3], sender, moderator, group, message)
self.assertEquals(self._email[0][4], None)
self.assertEquals(self._email[0][5], 25)
exists = storage.articleExistsRequest(articleID)
exists.addCallback(self.assertFalse)
return exists
result.addCallback(cbModerated)
return result
def _checkModeratorMessage(self, messageText, sender, moderator, group, postingText):
p = Parser()
msg = p.parsestr(messageText)
headers = dict(msg.items())
del headers['Message-ID']
self.assertEquals(
headers,
{'From': sender,
'To': moderator,
'Subject': 'Moderate new %s message: activities etc' % (group,),
'Content-Type': 'message/rfc822'})
posting = p.parsestr(postingText)
attachment = msg.get_payload()[0]
for header in ['from', 'to', 'subject', 'message-id', 'newsgroups']:
self.assertEquals(posting[header], attachment[header])
self.assertEquals(posting.get_payload(), attachment.get_payload())
class PickleStorageTests(ModerationTestsMixin, TestCase):
"""
Tests for L{PickleStorage}.
"""
def getStorage(self, groups, moderators, mailhost, sender):
"""
Create and return a L{PickleStorage} instance configured to require
moderation.
"""
storageFilename = self.mktemp()
storage = PickleStorage(
storageFilename, groups, moderators, mailhost, sender)
storage.sendmail = self.sendmail
self.addCleanup(PickleStorage.sharedDBs.pop, storageFilename)
return storage
class NewsShelfTests(ModerationTestsMixin, TestCase):
"""
Tests for L{NewsShelf}.
"""
def getStorage(self, groups, moderators, mailhost, sender):
"""
Create and return a L{NewsShelf} instance configured to require
moderation.
"""
storageFilename = self.mktemp()
shelf = NewsShelf(mailhost, storageFilename, sender)
for name in groups:
shelf.addGroup(name, 'm') # Dial 'm' for moderator
for address in moderators.get(name, []):
shelf.addModerator(name, address)
shelf.sendmail = self.sendmail
return shelf
def test_notifyModerator(self):
"""
L{NewsShelf.notifyModerator} sends a moderation email to a single
moderator.
"""
shelf = NewsShelf('example.com', self.mktemp(), 'alice@example.com')
shelf.sendmail = self.sendmail
shelf.notifyModerator('bob@example.org', Article('Foo: bar', 'Some text'))
self.assertEquals(len(self._email), 1)
def test_defaultSender(self):
"""
If no sender is specified to L{NewsShelf.notifyModerators}, a default
address based on the system hostname is used for both the envelope and
RFC 2822 sender addresses.
"""
shelf = NewsShelf('example.com', self.mktemp())
shelf.sendmail = self.sendmail
shelf.notifyModerators(['bob@example.org'], Article('Foo: bar', 'Some text'))
self.assertEquals(self._email[0][1], 'twisted-news@' + gethostname())
self.assertIn('From: twisted-news@' + gethostname(), self._email[0][3])
|
codeaudit/gp-structure-search | refs/heads/master | experiments/multi-d-250-16Feb.py | 3 | # Runs all 1d datasets.
Experiment(description='Run all multi D datasets',
data_dir='../data/kfold_data/',
max_depth=12,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=2,
sd=4,
max_jobs=250,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/16-Feb/',
iters=250)
|
Distrotech/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/core/management/commands/cleanup.py | 350 | import datetime
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Can be run as a cronjob or directly to clean out old data from the database (only expired sessions at the moment)."
def handle_noargs(self, **options):
from django.db import transaction
from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit_unless_managed()
|
Joergen/zamboni | refs/heads/uge43 | apps/amo/urls.py | 5 | import csp.views
from waffle.views import wafflejs
from django.conf.urls import include, patterns, url
from django.views.decorators.cache import never_cache
from . import views, install
services_patterns = patterns('',
url('^monitor(.json)?$', never_cache(views.monitor),
name='amo.monitor'),
url('^loaded$', never_cache(views.loaded), name='amo.loaded'),
url('^csp/policy$', csp.views.policy, name='amo.csp.policy'),
url('^csp/report$', views.cspreport, name='amo.csp.report'),
url('^builder-pingback', views.builder_pingback,
name='amo.builder-pingback'),
url('^timing/record$', views.record, name='amo.timing.record'),
url('^pfs.php$', views.plugin_check_redirect, name='api.plugincheck'),
url('^install.php$', install.install, name='api.install'),
)
urlpatterns = patterns('',
url('^robots.txt$', views.robots, name='robots.txt'),
url(r'^wafflejs$', wafflejs, name='wafflejs'),
('^services/', include(services_patterns)),
url('^opensearch.xml$', 'api.views.render_xml',
{'template': 'amo/opensearch.xml'},
name='amo.opensearch'),
)
|
dattatreya303/zulip | refs/heads/master | zerver/webhooks/jira/view.py | 3 | # Webhooks for external integrations.
from __future__ import absolute_import
from typing import Any, Dict, List, Optional, Text, Tuple
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from zerver.models import Client, UserProfile, get_user_profile_by_email, Realm
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import api_key_only_webhook_view, has_request_variables, REQ
import logging
import re
import ujson
IGNORED_EVENTS = [
'comment_created', # we handle issue_update event instead
'comment_updated', # we handle issue_update event instead
'comment_deleted', # we handle issue_update event instead
]
def guess_zulip_user_from_jira(jira_username, realm):
# type: (str, Realm) -> Optional[UserProfile]
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content, realm):
# type: (str, Realm) -> str
# Attempt to do some simplistic conversion of JIRA
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
# Starting a line with bq. block quotes that line
content = re.sub(r'bq\. (.*)', r'> \1', content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
# Full links which have a | are converted into a better markdown link
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]')
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
# Try to convert a JIRA user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the JIRA username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile(r'\[~(.*?)\]')
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = "**{}**".format(user_profile.full_name)
else:
replacement = "**{}**".format(username)
content = content.replace("[~{}]".format(username,), replacement)
return content
def get_in(payload, keys, default=''):
# type: (Dict[str, Any], List[str], str) -> Any
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
def get_issue_string(payload, issue_id=None):
# type: (Dict[str, Any], Text) -> Text
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
if issue_id is None:
issue_id = get_issue_id(payload)
base_url = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if base_url and len(base_url.groups()):
return "[{}]({}/browse/{})".format(issue_id, base_url.group(1), issue_id)
else:
return issue_id
def get_assignee_mention(assignee_email):
# type: (Text) -> Text
if assignee_email != '':
try:
assignee_name = get_user_profile_by_email(assignee_email).full_name
except UserProfile.DoesNotExist:
assignee_name = assignee_email
return "**{}**".format(assignee_name)
return ''
def get_issue_author(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['user', 'displayName'])
def get_issue_id(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'key'])
def get_issue_title(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'fields', 'summary'])
def get_issue_subject(payload):
# type: (Dict[str, Any]) -> Text
return "{}: {}".format(get_issue_id(payload), get_issue_title(payload))
def get_sub_event_for_update_issue(payload):
# type: (Dict[str, Any]) -> Text
sub_event = payload.get('issue_event_type_name', '')
if sub_event == '':
if payload.get('comment'):
return 'issue_commented'
elif payload.get('transition'):
return 'issue_transited'
return sub_event
def get_event_type(payload):
# type: (Dict[str, Any]) -> Text
event = payload.get('webhookEvent')
if event is None and payload.get('transition'):
event = 'jira:issue_updated'
return event
def add_change_info(content, field, from_field, to_field):
# type: (Text, Text, Text, Text) -> Text
content += "* Changed {}".format(field)
if from_field:
content += " from **{}**".format(from_field)
if to_field:
content += " to {}\n".format(to_field)
return content
def handle_updated_issue_event(payload, user_profile):
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
# type: (Dict[str, Any], UserProfile) -> Text
issue_id = get_in(payload, ['issue', 'key'])
issue = get_issue_string(payload, issue_id)
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = get_assignee_mention(assignee_email)
if assignee_mention != '':
assignee_blurb = " (assigned to {})".format(assignee_mention)
else:
assignee_blurb = ''
sub_event = get_sub_event_for_update_issue(payload)
if 'comment' in sub_event:
if sub_event == 'issue_commented':
verb = 'added comment to'
elif sub_event == 'issue_comment_edited':
verb = 'edited comment on'
else:
verb = 'deleted comment from'
content = u"{} **{}** {}{}".format(get_issue_author(payload), verb, issue, assignee_blurb)
comment = get_in(payload, ['comment', 'body'])
if comment:
comment = convert_jira_markup(comment, user_profile.realm)
content = u"{}:\n\n\n{}\n".format(content, comment)
else:
content = u"{} **updated** {}{}:\n\n".format(get_issue_author(payload), issue, assignee_blurb)
changelog = get_in(payload, ['changelog'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
if field == 'assignee' and assignee_mention != '':
target_field_string = assignee_mention
else:
# Convert a user's target to a @-mention if possible
target_field_string = "**{}**".format(item.get('toString'))
from_field_string = item.get('fromString')
if target_field_string or from_field_string:
content = add_change_info(content, field, from_field_string, target_field_string)
elif sub_event == 'issue_transited':
from_field_string = get_in(payload, ['transition', 'from_status'])
target_field_string = '**{}**'.format(get_in(payload, ['transition', 'to_status']))
if target_field_string or from_field_string:
content = add_change_info(content, 'status', from_field_string, target_field_string)
return content
def handle_created_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return "{} **created** {} priority {}, assigned to **{}**:\n\n> {}".format(
get_issue_author(payload),
get_issue_string(payload),
get_in(payload, ['issue', 'fields', 'priority', 'name']),
get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one'),
get_issue_title(payload)
)
def handle_deleted_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return "{} **deleted** {}!".format(get_issue_author(payload), get_issue_string(payload))
@api_key_only_webhook_view("JIRA")
@has_request_variables
def api_jira_webhook(request, user_profile, client,
payload=REQ(argument_type='body'),
stream=REQ(default='jira')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text) -> HttpResponse
event = get_event_type(payload)
if event == 'jira:issue_created':
subject = get_issue_subject(payload)
content = handle_created_issue_event(payload)
elif event == 'jira:issue_deleted':
subject = get_issue_subject(payload)
content = handle_deleted_issue_event(payload)
elif event == 'jira:issue_updated':
subject = get_issue_subject(payload)
content = handle_updated_issue_event(payload, user_profile)
elif event in IGNORED_EVENTS:
return json_success()
else:
if event is None:
if not settings.TEST_SUITE:
message = "Got JIRA event with None event type: {}".format(payload)
logging.warning(message)
return json_error(_("Event is not given by JIRA"))
else:
if not settings.TEST_SUITE:
logging.warning("Got JIRA event type we don't support: {}".format(event))
return json_error(_("Got JIRA event type we don't support: {}".format(event)))
check_send_message(user_profile, client, "stream", [stream], subject, content)
return json_success()
|
ESS-LLP/erpnext-healthcare | refs/heads/master | erpnext/accounts/report/non_billed_report.py | 41 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext import get_default_currency
from frappe.model.meta import get_field_precision
def get_ordered_to_be_billed_data(args):
doctype, party = args.get('doctype'), args.get('party')
child_tab = doctype + " Item"
precision = get_field_precision(frappe.get_meta(child_tab).get_field("billed_amt"),
currency=get_default_currency()) or 2
project_field = get_project_field(doctype, party)
return frappe.db.sql("""
Select
`{parent_tab}`.name, `{parent_tab}`.{date_field}, `{parent_tab}`.{party}, `{parent_tab}`.{party}_name,
{project_field}, `{child_tab}`.item_code, `{child_tab}`.base_amount,
(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),
(`{child_tab}`.base_amount - (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1))),
`{child_tab}`.item_name, `{child_tab}`.description, `{parent_tab}`.company
from
`{parent_tab}`, `{child_tab}`
where
`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1 and `{parent_tab}`.status != 'Closed'
and `{child_tab}`.amount > 0 and round(`{child_tab}`.billed_amt *
ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) < `{child_tab}`.base_amount
order by
`{parent_tab}`.{order} {order_by}
""".format(parent_tab = 'tab' + doctype, child_tab = 'tab' + child_tab, precision= precision, party = party,
date_field = args.get('date'), project_field = project_field, order= args.get('order'), order_by = args.get('order_by')))
def get_project_field(doctype, party):
if party == "supplier": doctype = doctype + ' Item'
return "`tab%s`.project"%(doctype) |
coreynicholson/youtube-dl | refs/heads/master | youtube_dl/extractor/tagesschau.py | 58 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
parse_iso8601,
parse_filesize,
)
class TagesschauPlayerIE(InfoExtractor):
IE_NAME = 'tagesschau:player'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
'md5': '8d09548d5c15debad38bee3a4d15ca21',
'info_dict': {
'id': '179517',
'ext': 'mp4',
'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD',
'thumbnail': r're:^https?:.*\.jpg$',
'formats': 'mincount:6',
},
}, {
'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html',
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
'info_dict': {
'id': '29417',
'ext': 'mp3',
'title': 'Trabi - Bye, bye Rennpappe',
'thumbnail': r're:^https?:.*\.jpg$',
'formats': 'mincount:2',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html',
'only_matching': True,
}]
_FORMATS = {
'xs': {'quality': 0},
's': {'width': 320, 'height': 180, 'quality': 1},
'm': {'width': 512, 'height': 288, 'quality': 2},
'l': {'width': 960, 'height': 540, 'quality': 3},
'xl': {'width': 1280, 'height': 720, 'quality': 4},
'xxl': {'quality': 5},
}
def _extract_via_api(self, kind, video_id):
info = self._download_json(
'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id),
video_id)
title = info['headline']
formats = []
for media in info['mediadata']:
for format_id, format_url in media.items():
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls'))
else:
formats.append({
'url': format_url,
'format_id': format_id,
'vcodec': 'none' if kind == 'audio' else None,
})
self._sort_formats(formats)
timestamp = parse_iso8601(info.get('date'))
return {
'id': video_id,
'title': title,
'timestamp': timestamp,
'formats': formats,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# kind = mobj.group('kind').lower()
# if kind == 'video':
# return self._extract_via_api(kind, video_id)
# JSON api does not provide some audio formats (e.g. ogg) thus
# extractiong audio via webpage
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
formats = []
for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage):
media = self._parse_json(js_to_json(media_json), video_id, fatal=False)
if not media:
continue
src = media.get('src')
if not src:
return
quality = media.get('quality')
kind = media.get('type', '').split('/')[0]
ext = determine_ext(src)
f = {
'url': src,
'format_id': '%s_%s' % (quality, ext) if quality else ext,
'ext': ext,
'vcodec': 'none' if kind == 'audio' else None,
}
f.update(self._FORMATS.get(quality, {}))
formats.append(f)
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
class TagesschauIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6',
'info_dict': {
'id': 'video-102143',
'ext': 'mp4',
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
'description': '18.07.2015 20:10 Uhr',
'thumbnail': r're:^https?:.*\.jpg$',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
'md5': '3c54c1f6243d279b706bde660ceec633',
'info_dict': {
'id': 'ts-5727',
'ext': 'mp4',
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
'description': 'md5:695c01bfd98b7e313c501386327aea59',
'thumbnail': r're:^https?:.*\.jpg$',
},
}, {
# exclusive audio
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html',
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
'info_dict': {
'id': 'audio-29417',
'ext': 'mp3',
'title': 'Trabi - Bye, bye Rennpappe',
'description': 'md5:8687dda862cbbe2cfb2df09b56341317',
'thumbnail': r're:^https?:.*\.jpg$',
},
}, {
# audio in article
'url': 'http://www.tagesschau.de/inland/bnd-303.html',
'md5': 'e0916c623e85fc1d2b26b78f299d3958',
'info_dict': {
'id': 'bnd-303',
'ext': 'mp3',
'title': 'Viele Baustellen für neuen BND-Chef',
'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4',
'thumbnail': r're:^https?:.*\.jpg$',
},
}, {
'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html',
'info_dict': {
'id': 'afd-parteitag-135',
'title': 'Möchtegern-Underdog mit Machtanspruch',
},
'playlist_count': 2,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/100sekunden/index.html',
'only_matching': True,
}, {
# playlist article with collapsing sections
'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url)
def _extract_formats(self, download_text, media_kind):
links = re.finditer(
r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
download_text)
formats = []
for l in links:
link_url = l.group('url')
if not link_url:
continue
format_id = self._search_regex(
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
default=determine_ext(link_url))
format = {
'format_id': format_id,
'url': l.group('url'),
'format_name': l.group('name'),
}
title = l.group('title')
if title:
if media_kind.lower() == 'video':
m = re.match(
r'''(?x)
Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10;
(?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10;
(?P<vbr>[0-9]+)kbps&\#10;
Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10;
Größe:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''',
title)
if m:
format.update({
'format_note': m.group('audio_desc'),
'vcodec': m.group('vcodec'),
'width': int(m.group('width')),
'height': int(m.group('height')),
'abr': int(m.group('abr')),
'vbr': int(m.group('vbr')),
'filesize_approx': parse_filesize(m.group('filesize_approx')),
})
else:
m = re.match(
r'(?P<format>.+?)-Format\s*:\s*(?P<abr>\d+)kbps\s*,\s*(?P<note>.+)',
title)
if m:
format.update({
'format_note': '%s, %s' % (m.group('format'), m.group('note')),
'vcodec': 'none',
'abr': int(m.group('abr')),
})
formats.append(format)
self._sort_formats(formats)
return formats
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('path')
display_id = video_id.lstrip('-')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
webpage, 'title', default=None) or self._og_search_title(webpage)
DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'
webpage_type = self._og_search_property('type', webpage, default=None)
if webpage_type == 'website': # Article
entries = []
for num, (entry_title, media_kind, download_text) in enumerate(re.findall(
r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
webpage), 1):
entries.append({
'id': '%s-%d' % (display_id, num),
'title': '%s' % entry_title,
'formats': self._extract_formats(download_text, media_kind),
})
if len(entries) > 1:
return self.playlist_result(entries, display_id, title)
formats = entries[0]['formats']
else: # Assume single video
download_text = self._search_regex(
DOWNLOAD_REGEX, webpage, 'download links', group='links')
media_kind = self._search_regex(
DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind')
formats = self._extract_formats(download_text, media_kind)
thumbnail = self._og_search_thumbnail(webpage)
description = self._html_search_regex(
r'(?s)<p class="teasertext">(.*?)</p>',
webpage, 'description', default=None)
self._sort_formats(formats)
return {
'id': display_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
'description': description,
}
|
xcLtw/small-blog | refs/heads/master | app/models.py | 1 | from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serialzer
from flask import current_app, request
from datetime import datetime
import hashlib
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
email = db.Column(db.String(64), unique=True, index=True)
confirmed = db.Column(db.Boolean, default=False)
# user_details
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=1200):
s = Serialzer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serialzer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=1200):
s = Serialzer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serialzer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravator.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
if self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=self.avatar_hash, size=size, default=default, rating=rating)
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
betrisey/home-assistant | refs/heads/dev | homeassistant/components/netatmo.py | 2 | """
Support for the Netatmo devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/netatmo/
"""
import logging
from datetime import timedelta
from urllib.error import HTTPError
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY, CONF_PASSWORD, CONF_USERNAME, CONF_DISCOVERY)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = [
'https://github.com/jabesq/netatmo-api-python/archive/'
'v0.6.0.zip#lnetatmo==0.6.0']
_LOGGER = logging.getLogger(__name__)
CONF_SECRET_KEY = 'secret_key'
DOMAIN = 'netatmo'
NETATMO_AUTH = None
DEFAULT_DISCOVERY = True
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Netatmo devices."""
import lnetatmo
global NETATMO_AUTH
try:
NETATMO_AUTH = lnetatmo.ClientAuth(
config[DOMAIN][CONF_API_KEY], config[DOMAIN][CONF_SECRET_KEY],
config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD],
'read_station read_camera access_camera '
'read_thermostat write_thermostat')
except HTTPError:
_LOGGER.error("Unable to connect to Netatmo API")
return False
if config[DOMAIN][CONF_DISCOVERY]:
for component in 'camera', 'sensor', 'binary_sensor', 'climate':
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class WelcomeData(object):
"""Get the latest data from Netatmo."""
def __init__(self, auth, home=None):
"""Initialize the data object."""
self.auth = auth
self.welcomedata = None
self.camera_names = []
self.home = home
def get_camera_names(self):
"""Return all module available on the API as a list."""
self.camera_names = []
self.update()
if not self.home:
for home in self.welcomedata.cameras:
for camera in self.welcomedata.cameras[home].values():
self.camera_names.append(camera['name'])
else:
for camera in self.welcomedata.cameras[self.home].values():
self.camera_names.append(camera['name'])
return self.camera_names
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the Netatmo API to update the data."""
import lnetatmo
self.welcomedata = lnetatmo.WelcomeData(self.auth)
|
kopach/git-cola | refs/heads/master | cola/resources.py | 2 | """Provides the prefix() function for finding cola resources"""
from __future__ import division, absolute_import, unicode_literals
import os
import webbrowser
from os.path import dirname
from . import core
# Default git-cola icon theme
_default_icon_theme = 'light'
_modpath = core.abspath(__file__)
if os.path.join('share', 'git-cola', 'lib') in _modpath:
# this is the release tree
# __file__ = '$prefix/share/git-cola/lib/cola/__file__.py'
_lib_dir = dirname(dirname(_modpath))
_prefix = dirname(dirname(dirname(_lib_dir)))
elif os.path.join('pkgs', 'cola') in _modpath:
# Windows release tree
# __file__ = $installdir/pkgs/cola/resources.py
_prefix = dirname(dirname(dirname(_modpath)))
else:
# this is the source tree
# __file__ = '$prefix/cola/__file__.py'
_prefix = dirname(dirname(_modpath))
def prefix(*args):
"""Return a path relative to cola's installation prefix"""
return os.path.join(_prefix, *args)
def doc(*args):
"""Return a path relative to cola's /usr/share/doc/ directory"""
return os.path.join(_prefix, 'share', 'doc', 'git-cola', *args)
def html_docs():
"""Return the path to the cola html documentation."""
# html/index.html only exists after the install-docs target is run.
# Fallback to the source tree and lastly git-cola.rst.
paths_to_try = (('html', 'index.html'),
('_build', 'html', 'index.html'))
for paths in paths_to_try:
docdir = doc(*paths)
if core.exists(docdir):
return docdir
return doc('git-cola.rst')
def show_html_docs():
url = html_docs()
webbrowser.open_new_tab('file://' + url)
def share(*args):
"""Return a path relative to cola's /usr/share/ directory"""
return prefix('share', 'git-cola', *args)
def icon_dir(theme):
"""Return the path to the icons directory
This typically returns share/git-cola/icons within
the git-cola installation prefix.
When theme is defined then it will return a subdirectory of the icons/
directory, e.g. "dark" for the dark icon theme.
When theme is set to an absolute directory path, that directory will be
returned, which effectively makes git-cola use those icons.
"""
if not theme or theme == _default_icon_theme:
icons = share('icons')
else:
theme_dir = share('icons', theme)
if os.path.isabs(theme) and os.path.isdir(theme):
icons = theme
elif os.path.isdir(theme_dir):
icons = theme_dir
else:
icons = share('icons')
return icons
def config_home(*args):
config = core.getenv('XDG_CONFIG_HOME',
os.path.join(core.expanduser('~'), '.config'))
return os.path.join(config, 'git-cola', *args)
|
jk1/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/admindocs/models.py | 634 | # Empty models.py to allow for specifying admindocs as a test label.
|
hsharsha/depot_tools | refs/heads/master | fetch_configs/ios.py | 12 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class IOS(config_util.Config):
"""Basic Config alias for iOS -> Chromium."""
@staticmethod
def fetch_spec(props):
return {
'alias': {
'config': 'chromium',
'props': ['--target_os=ios', '--target_os_only=True'],
},
}
@staticmethod
def expected_root(_props):
return ''
def main(argv=None):
return IOS().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
terrycain/razer-drivers | refs/heads/master | pylib/openrazer/client/__init__.py | 2 | import json
import dbus as _dbus
from openrazer.client.device import RazerDeviceFactory as _RazerDeviceFactory
from openrazer.client import constants
__version__ = '2.5.0'
class DaemonNotFound(Exception):
pass
class DeviceManager(object):
"""
DeviceManager Class
"""
def __init__(self):
# Load up the DBus
session_bus = _dbus.SessionBus()
try:
self._dbus = session_bus.get_object("org.razer", "/org/razer")
except _dbus.DBusException:
raise DaemonNotFound("Could not connect to daemon")
# Get interface for daemon methods
self._dbus_daemon = _dbus.Interface(self._dbus, "razer.daemon")
# Get interface for devices methods
self._dbus_devices = _dbus.Interface(self._dbus, "razer.devices")
self._device_serials = self._dbus_devices.getDevices()
self._devices = []
self._daemon_version = self._dbus_daemon.version()
for serial in self._device_serials:
device = _RazerDeviceFactory.get_device(serial)
self._devices.append(device)
def stop_daemon(self):
"""
Stops the Daemon via a DBus call
"""
self._dbus_daemon.stop()
@property
def turn_off_on_screensaver(self):
return self._dbus_devices.getOffOnScreensaver()
@turn_off_on_screensaver.setter
def turn_off_on_screensaver(self, enable):
"""
Enable or Disable the logic to turn off the devices whilst the screensaver is active
If True, when the screensaver is active the devices' brightness will be set to 0.
When the screensaver is inactive the devices' brightness will be restored
:param enable: True to enable screensaver disable
:type enable: bool
:raises ValueError: If enable isn't a bool
"""
if not isinstance(enable, bool):
raise ValueError("Enable must be a boolean")
self._dbus_devices.enableTurnOffOnScreensaver(enable)
@property
def sync_effects(self):
return self._dbus_devices.getSyncEffects()
@sync_effects.setter
def sync_effects(self, sync):
"""
Enable or disable the syncing of effects between devices
If sync is enabled, whenever an effect is set then it will be set on all other devices if the effect is available or a similar effect if it is not.
:param sync: Sync effects
:type sync: bool
:raises ValueError: If sync isn't a bool
"""
if not isinstance(sync, bool):
raise ValueError("Sync must be a boolean")
self._dbus_devices.syncEffects(sync)
@property
def supported_devices(self):
json_data = self._dbus_daemon.supportedDevices()
return json.loads(json_data)
@property
def devices(self):
"""
A list of Razer devices
:return: List of devices
:rtype: list[razer.client.devices.RazerDevice]
"""
return self._devices
@property
def version(self):
"""
Python library version
:return: Version
:rtype: str
"""
return __version__
@property
def daemon_version(self):
"""
Daemon version
:return: Daemon version
:rtype: str
"""
return str(self._daemon_version)
if __name__ == '__main__':
a = DeviceManager()
b = a.devices[0]
print()
|
PhilLidar-DAD/geonode | refs/heads/master | geonode/documents/urls.py | 1 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from .views import DocumentUploadView, DocumentUpdateView
js_info_dict = {
'packages': ('geonode.documents',),
}
urlpatterns = patterns('geonode.documents.views',
url(r'^$', TemplateView.as_view(template_name='documents/document_list.html'),
name='document_browse'),
url(r'^(?P<docid>\d+)/?$', 'document_detail', name='document_detail'),
url(r'^(?P<docid>\d+)/download/?$', 'document_download', name='document_download'),
url(r'^(?P<docid>\d+)/replace$', login_required(DocumentUpdateView.as_view()),
name="document_replace"),
url(r'^(?P<docid>\d+)/remove$', 'document_remove', name="document_remove"),
url(r'^upload/?$', login_required(DocumentUploadView.as_view()), name='document_upload'),
url(r'^document_csv_download/$', 'document_csv_download', name='document_csv_download'),
url(r'^search/?$', 'document_search_page', name='document_search_page'),
url(r'^(?P<docid>\d+)/metadata$', 'document_metadata', name='document_metadata'),
)
|
bvorjohan/2048_AI | refs/heads/master | logic.py | 1 | #
from random import *
import numpy as np
def new_game(n):
#####
# Generate new matrix
#####
matrix = []
for i in range(n):
matrix.append([0] * n)
return matrix
def num_zeros(mat):
#####
# Returns the number of 0s in a 4x4 matrix
#####
return 16-np.count_nonzero(mat)
def add_two(mat):
#####
# Adds a random tile
#####
a = randint(0, len(mat)-1)
b = randint(0, len(mat)-1)
while mat[a][b] != 0:
a = randint(0, len(mat)-1)
b = randint(0, len(mat)-1)
mat[a][b] = (2 if random() < .9 else 4)
return mat
def game_score(mat):
#####
# Evaluates the value of a game by taking the value of the greatest tile
#####
max_tile = 0
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j] > max_tile:
max_tile = mat[i][j]
return max_tile
def game_state(mat):
#####
# Determines the state of the game
#####
for i in range(len(mat)-1):
for j in range(len(mat[0])-1):
if mat[i][j] == mat[i+1][j] or mat[i][j+1] == mat[i][j]:
return 'not over'
if num_zeros(mat) > 0:
return 'not over'
for k in range(len(mat)-1):
if mat[len(mat)-1][k] == mat[len(mat)-1][k+1]:
return 'not over'
for j in range(len(mat)-1):
if mat[j][len(mat)-1] == mat[j+1][len(mat)-1]:
return 'not over'
return 'lose'
def reverse(mat):
#####
# Flips matrix laterally
#####
return np.flip(mat, 1)
def transpose(mat):
#####
# Transposes matrix
#####
return np.transpose(mat)
def cover_up(mat):
#####
# Performs a "swipe" without merging
#####
new = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
done = False
for i in range(4):
count = 0
for j in range(4):
if mat[i][j] != 0:
new[i][count] = mat[i][j]
if j != count:
done = True
count += 1
return new, done
def merge(mat):
#####
# Performs a merge operation - to be done after a swipe in "cover_up"
#####
done = False
for i in range(4):
for j in range(3):
if mat[i][j] == mat[i][j+1] and mat[i][j] != 0:
mat[i][j] *= 2
mat[i][j+1] = 0
done = True
return mat, done
def merge_score(mat):
#####
# Determines the score gained from a move (as in the original game)
#####
score = 0
mult = 1
done = False
for i in range(4):
for j in range(3):
if mat[i][j] == mat[i][j+1] and mat[i][j] != 0:
mat[i][j]*=2
mat[i][j+1]=0
if i == 0 or i == 3:
mult+=1
if j == 0 or j == 3:
mult+=1
score += mat[i][j]*1
done = True
return mat, done, score
def up(game):
# print("up")
# return matrix after shifting up
game = transpose(game)
game, done = cover_up(game)
temp = merge(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
game = transpose(game)
return game, done
def down(game):
# print("down")
game = reverse(transpose(game))
game, done=cover_up(game)
temp = merge(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
game = transpose(reverse(game))
return game, done
def left(game):
# print("left")
# return matrix after shifting left
game, done = cover_up(game)
temp = merge(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
return game, done
def right(game):
# print("right")
# return matrix after shifting right
game = reverse(game)
game, done = cover_up(game)
temp = merge(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
game = reverse(game)
return game, done
def up_score(game):
# print("up")
# return matrix and score after shifting up
game = transpose(game)
game, done = cover_up(game)
temp = merge_score(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
game = transpose(game)
score = temp[2]
return game, done, score
def down_score(game):
# print("down")
# return matrix and score after shifting down
game = reverse(transpose(game))
game, done = cover_up(game)
temp = merge_score(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
score = temp[2]
game = transpose(reverse(game))
return game, done, score
def left_score(game):
# print("left")
# return matrix and score after shifting left
game, done = cover_up(game)
temp = merge_score(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
score = temp[2]
return game, done, score
def right_score(game):
# print("right")
# return matrix and score after shifting right
game = reverse(game)
game, done = cover_up(game)
temp = merge_score(game)
game = temp[0]
done = done or temp[1]
game = cover_up(game)[0]
score = temp[2]
game = reverse(game)
return game, done, score
def brad_state_score(mat):
#####
# An alternative scoring function, one which tries to imitate an intuative, human style
#####
coord_list = [(0,0),(0,1),(0,2),(0,3),
(1, 3), (1, 2), (1, 1), (1, 0),
(2, 0), (2, 1), (2, 2), (2, 3),
(3, 3), (3, 2), (3, 1), (3, 0),]
weight_mat = np.array([32768,16384,8192,4096, 2048, 1024, 512, 256, 128,64,32,16, 8, 4, 2, 1])
biggest_tile = np.argmax([mat[0][0], mat[0][3], mat[3][0], mat[3][3]])
score_0 = 0
score_1 = 0
tile_max_0 = [mat[0][0], mat[0][3], mat[3][0], mat[3][3]][biggest_tile]
tile_max_1 = tile_max_0
# biggest_tile = 0
if biggest_tile == 0:
for i, coord in enumerate(coord_list):
# print(coord)
# x=coord[0]
# y=coord[1]
# print(x)
# print(y)
tile_0 = mat[coord[0]][coord[1]]
tile_1 = mat[coord[1]][coord[0]]
if tile_0 <= tile_max_0:
score_0 += tile_0 * weight_mat[i]
tile_max_0 = tile_0
if tile_1 <= tile_max_1:
score_1 += tile_1 * weight_mat[i]
tile_max_1 = tile_1
return np.amax([score_0,score_1])
elif biggest_tile == 1:
for i, coord in enumerate(coord_list):
tile_0 = mat[coord[0]][3 - coord[1]]
tile_1 = mat[3 - coord[1]][coord[0]]
if tile_0 <= tile_max_0:
score_0 += tile_0 * weight_mat[i]
tile_max_0 = tile_0
if tile_1 <= tile_max_1:
score_1 += tile_1 * weight_mat[i]
tile_max_1 = tile_1
return np.amax([score_0,score_1])
elif biggest_tile == 2:
for i, coord in enumerate(coord_list):
tile_0 = mat[3 - coord[0]][coord[1]]
tile_1 = mat[coord[1]][3 - coord[0]]
if tile_0 <= tile_max_0:
score_0 += tile_0 * weight_mat[i]
tile_max_0 = tile_0
if tile_1 <= tile_max_1:
score_1 += tile_1 * weight_mat[i]
tile_max_1 = tile_1
return np.amax([score_0,score_1])
elif biggest_tile == 3:
for i, coord in enumerate(coord_list):
tile_0 = mat[3 - coord[0]][3 - coord[1]]
tile_1 = mat[3 - coord[1]][3 - coord[0]]
if tile_0 <= tile_max_0:
score_0 += tile_0 * weight_mat[i]
tile_max_0 = tile_0
if tile_1 <= tile_max_1:
score_1 += tile_1 * weight_mat[i]
tile_max_1 = tile_1
return np.amax([score_0, score_1])
def zeros_to_steps(zeros):
#####
# Converts the number of zeros on the board to iterative steps
#####
if zeros < 2:
return 4
elif zeros < 6:
return 3
else:
return 2
|
virajs/selenium-1 | refs/heads/trunk | py/test/selenium/common/utils.py | 20 | import os
import socket
import time
import urllib
import subprocess
import signal
SERVER_ADDR = "localhost"
DEFAULT_PORT = 4444
SERVER_PATH = "build/java/server/src/org/openqa/grid/selenium/selenium-standalone.jar"
def start_server(module):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
url = "http://%s:%d/wd/hub" % (SERVER_ADDR, DEFAULT_PORT)
try:
_socket.connect((SERVER_ADDR, DEFAULT_PORT))
print ("The remote driver server is already running or something else"
"is using port %d, continuing..." % DEFAULT_PORT)
except:
print ("Starting the remote driver server")
module.server_proc = subprocess.Popen(
"java -jar %s" % SERVER_PATH,
shell=True)
assert wait_for_server(url, 10), "can't connect"
print "Server should be online"
def wait_for_server(url, timeout):
start = time.time()
while time.time() - start < timeout:
try:
urllib.urlopen(url)
return 1
except IOError:
time.sleep(0.2)
return 0
def stop_server(module):
# FIXME: This does not seem to work, the server process lingers
try:
os.kill(module.server_proc.pid, signal.SIGTERM)
time.sleep(5)
except:
pass
|
arjay2539/Hacktoberfest-2k17 | refs/heads/master | Python/pankaj.py | 3 | import random
print(random.randint(0,99))
|
ziozzang/kernel-rhel6 | refs/heads/master | tools/perf/python/twatch.py | 3213 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
alanljj/oca_hr | refs/heads/8.0 | hr_infraction/wizard/__init__.py | 28 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import action
|
osgn/python-topojson | refs/heads/master | src/topojson/clockwise.py | 1 | class Clock:
def __init__(self, area):
self.area = area
def clock(self, feature):
if 'geometries' in feature:
feature['geometries'] = map(
self.clock_geometry, feature['geometries'])
elif 'geometry' in feature:
feature['geometry'] = self.clock_geometry(feature['geometry'])
return feature
def clock_geometry(self, geo):
if 'type' in geo:
if geo['type'] == 'Polygon' or geo['type'] == 'MultiLineString':
geo['coordinates'] = self.clockwise_polygon(geo['coordinates'])
elif geo['type'] == 'MultiPolygon':
geo['coordinates'] = map(
lambda x: self.clockwise_polygon(x), geo['coordinates'])
elif geo['type'] == 'LineString':
geo['coordinates'] = self.clockwise_ring(geo['coordinates'])
return geo
def clockwise_polygon(self, rings):
return map(lambda x: self.clockwise_ring(x), rings)
def clockwise_ring(self, ring):
if self.area(ring) > 0:
return list(reversed(ring))
else:
return ring
|
VioletRed/script.module.urlresolver | refs/heads/master | lib/urlresolver/plugins/cloudy.py | 1 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
import urllib2, os
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import xbmcgui
from lib import unwise
import urllib
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class CloudyResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "cloudy.ec"
domains = [ "cloudy.ec", "cloudy.eu", "cloudy.sx", "cloudy.ch", "cloudy.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def __get_stream_url(self, media_id, filekey, error_num=0, error_url=None):
'''
Get stream url.
If previously found stream url is a dead link, add error params and try again
'''
if error_num > 0 and error_url:
_error_params = '&numOfErrors={0}&errorCode=404&errorUrl={1}'.format(
error_num,
urllib.quote_plus(error_url).replace('.', '%2E')
)
else:
_error_params = ''
#use api to find stream address
api_call = 'http://www.cloudy.ec/api/player.api.php?{0}&file={1}&key={2}{3}'.format(
'user=undefined&pass=undefined',
media_id,
urllib.quote_plus(filekey).replace('.', '%2E'),
_error_params
)
api_html = self.net.http_GET(api_call).content
rapi = re.search('url=(.+?)&title=', api_html)
if rapi:
return urllib.unquote(rapi.group(1))
return None
def __is_stream_url_active(self, web_url):
try:
header = self.net.http_HEAD(web_url)
if header.get_headers():
return True
return False
except:
return False
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
dialog = xbmcgui.Dialog()
#grab stream details
try:
html = self.net.http_GET(web_url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
error_url = None
stream_url = None
# try to resolve 3 times then give up
for x in range(0, 2):
link = self.__get_stream_url(media_id, filekey,
error_num=x,
error_url=error_url)
if link:
active = self.__is_stream_url_active(link)
if active:
stream_url = urllib.unquote(link)
break;
else:
# link inactive
error_url = link
else:
# no link found
raise Exception ('File Not Found or removed')
if stream_url:
return stream_url
else:
raise Exception ('File Not Found or removed')
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 8000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log('**** Cloudy Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]CLOUDY[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.cloudy.ec/embed.php?id=%s' % media_id
def get_host_and_id(self, url):
r = re.search('(https?://(?:www\.|embed\.)cloudy\.(?:ec|eu|sx|ch|com))/(?:video/|embed\.php\?id=)([0-9a-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('https?://(?:www\.|embed\.)cloudy\.(?:ec|eu|sx|ch|com)/(?:video/|embed\.php\?id=)([0-9a-z]+)', url) or 'cloudy.' in host
|
apaleyes/mxnet | refs/heads/master | example/recommenders/matrix_fact.py | 15 | import math
import mxnet as mx
import numpy as np
import mxnet.notebook.callback
import logging
logging.basicConfig(level=logging.DEBUG)
def RMSE(label, pred):
ret = 0.0
n = 0.0
pred = pred.flatten()
for i in range(len(label)):
ret += (label[i] - pred[i]) * (label[i] - pred[i])
n += 1.0
return math.sqrt(ret / n)
def train(network, data_pair, num_epoch, learning_rate, optimizer='sgd', opt_args=None, ctx=[mx.gpu(0)]):
np.random.seed(123) # Fix random seed for consistent demos
mx.random.seed(123) # Fix random seed for consistent demos
if not opt_args:
opt_args = {}
if optimizer=='sgd' and (not opt_args):
opt_args['momentum'] = 0.9
model = mx.model.FeedForward(
ctx = ctx,
symbol = network,
num_epoch = num_epoch,
optimizer = optimizer,
learning_rate = learning_rate,
wd = 1e-4,
**opt_args
)
train, test = (data_pair)
lc = mxnet.notebook.callback.LiveLearningCurve('RMSE', 1)
model.fit(X = train,
eval_data = test,
eval_metric = RMSE,
**mxnet.notebook.callback.args_wrapper(lc)
)
return lc
|
swails/chemlab | refs/heads/master | chemlab/db/utils.py | 6 | # Utilities for data handling
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, str) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__=has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, str)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.itervalues():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.itervalues():
yield v[1]
def iteritems(self):
for (k, v) in self.data.itervalues():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
|
jjo31/ATHAM-Fluidity | refs/heads/ToMerge | tests/gls-Kato_Phillips-mixed_layer_depth/mld_calc.py | 4 | from numpy import arange,concatenate,array,argsort,zeros
import os
import sys
import vtktools
import math
import re
from math import sqrt
from scipy.interpolate import UnivariateSpline
#### taken from http://www.codinghorror.com/blog/archives/001018.html #######
def sort_nicely( l ):
""" Sort the given list in the way that humans expect.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
l.sort( key=alphanum_key )
##############################################################################
# compute the mixed layer depth over time
def MLD(filelist):
x0 = 0.
tke0 = 1.0e-5
last_mld = 0
times = []
depths = []
Dm = []
for file in filelist:
try:
os.stat(file)
except:
print "No such file: %s" % file
sys.exit(1)
u=vtktools.vtu(file)
time = u.GetScalarField('Time')
tt = time[0]
kk = u.GetScalarField('GLSTurbulentKineticEnergy')
pos = u.GetLocations()
# ignore first 4 hours of simulaiton
if (tt < 14400):
continue
xyzkk = []
for i in range(0,len(kk)):
if( abs(pos[i,0] - x0) < 0.1 ):
xyzkk.append((pos[i,0],-pos[i,1],pos[i,2],(kk[i])))
xyzkkarr = vtktools.arr(xyzkk)
III = argsort(xyzkkarr[:,1])
xyzkkarrsort = xyzkkarr[III,:]
# march down the column, grabbing the last value above tk0 and the first
# one less than tke0. Interpolate between to get the MLD
kea = 1000
keb = 0
zza = 0
zzb = 0
for values in xyzkkarrsort:
if (values[3] > tke0):
kea = values[3]
zza = -values[1]
if (values[3] < tke0):
keb = values[3]
zzb = -values[1]
break
# the MLD is somewhere between these two values - let's estimate half way!
mld = (zzb+zza)/2.
if (last_mld == mld):
continue
times.append(tt/3600)
depths.append(-1.0*mld)
last_mld = mld
Dm.append(1.05*0.00988211768*(1.0/sqrt(0.01))*sqrt(tt))
return times, depths, Dm
|
edisonlz/fruit | refs/heads/master | web_project/base/site-packages/sorl/thumbnail/images.py | 2 | # encoding=utf-8
from __future__ import unicode_literals, division
import os
import re
from django.core.files.base import File, ContentFile
from django.core.files.storage import Storage, default_storage
from django.utils.functional import LazyObject, empty
from sorl.thumbnail import default
from sorl.thumbnail.conf import settings
from sorl.thumbnail.compat import (json, urlopen, urlparse, urlsplit,
quote, quote_plus,
URLError, force_unicode, encode)
from sorl.thumbnail.helpers import ThumbnailError, tokey, get_module_class, deserialize
from sorl.thumbnail.parsers import parse_geometry
url_pat = re.compile(r'^(https?|ftp):\/\/')
def serialize_image_file(image_file):
if image_file.size is None:
raise ThumbnailError('Trying to serialize an ``ImageFile`` with a '
'``None`` size.')
data = {
'name': image_file.name,
'storage': image_file.serialize_storage(),
'size': image_file.size,
}
return json.dumps(data)
def deserialize_image_file(s):
data = deserialize(s)
class LazyStorage(LazyObject):
def _setup(self):
self._wrapped = get_module_class(data['storage'])()
image_file = ImageFile(data['name'], LazyStorage())
image_file.set_size(data['size'])
return image_file
class BaseImageFile(object):
size = []
def exists(self):
raise NotImplemented()
@property
def width(self):
return self.size[0]
x = width
@property
def height(self):
return self.size[1]
y = height
def is_portrait(self):
return self.y > self.x
@property
def ratio(self):
return float(self.x) / float(self.y)
@property
def url(self):
raise NotImplemented()
src = url
class ImageFile(BaseImageFile):
_size = None
def __init__(self, file_, storage=None):
if not file_:
raise ThumbnailError('File is empty.')
# figure out name
if hasattr(file_, 'name'):
self.name = file_.name
else:
self.name = force_unicode(file_)
# figure out storage
if storage is not None:
self.storage = storage
elif hasattr(file_, 'storage'):
self.storage = file_.storage
elif url_pat.match(self.name):
self.storage = UrlStorage()
else:
self.storage = default_storage
if hasattr(self.storage, 'location'):
location = self.storage.location
if not self.storage.location.endswith("/"):
location += "/"
if self.name.startswith(location):
self.name = self.name[len(location):]
def __unicode__(self):
return self.name
def exists(self):
return self.storage.exists(self.name)
def set_size(self, size=None):
# set the size if given
if size is not None:
pass
# Don't try to set the size the expensive way if it already has a
# value.
elif self._size is not None:
return
elif hasattr(self.storage, 'image_size'):
# Storage backends can implement ``image_size`` method that
# optimizes this.
size = self.storage.image_size(self.name)
else:
# This is the worst case scenario
image = default.engine.get_image(self)
size = default.engine.get_image_size(image)
self._size = list(size)
@property
def size(self):
return self._size
@property
def url(self):
return self.storage.url(self.name)
def read(self):
return self.storage.open(self.name).read()
def write(self, content):
if not isinstance(content, File):
content = ContentFile(content)
self._size = None
self.name = self.storage.save(self.name, content)
return self.name
def delete(self):
return self.storage.delete(self.name)
def serialize_storage(self):
if isinstance(self.storage, LazyObject):
# if storage is wrapped in a lazy object we need to get the real
# thing.
if self.storage._wrapped is empty:
self.storage._setup()
cls = self.storage._wrapped.__class__
else:
cls = self.storage.__class__
return '%s.%s' % (cls.__module__, cls.__name__)
@property
def key(self):
return tokey(self.name, self.serialize_storage())
def serialize(self):
return serialize_image_file(self)
class DummyImageFile(BaseImageFile):
def __init__(self, geometry_string):
self.size = parse_geometry(
geometry_string,
settings.THUMBNAIL_DUMMY_RATIO,
)
def exists(self):
return True
@property
def url(self):
return settings.THUMBNAIL_DUMMY_SOURCE % (
{'width': self.x, 'height': self.y}
)
class UrlStorage(Storage):
def normalize_url(self, url, charset='utf-8'):
url = encode(url, charset, 'ignore')
scheme, netloc, path, qs, anchor = urlsplit(url)
# Encode to utf8 to prevent urllib KeyError
path = encode(path, charset, 'ignore')
path = quote(path, '/%')
qs = quote_plus(qs, ':&%=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def open(self, name, mode='rb'):
return urlopen(
self.normalize_url(name),
None,
settings.THUMBNAIL_URL_TIMEOUT
)
def exists(self, name):
try:
self.open(name)
except URLError:
return False
return True
def url(self, name):
return name
def delete(self, name):
pass
def delete_all_thumbnails():
storage = default.storage
path = os.path.join(storage.location, settings.THUMBNAIL_PREFIX)
def walk(path):
dirs, files = storage.listdir(path)
for f in files:
storage.delete(os.path.join(path, f))
for d in dirs:
directory = os.path.join(path, d)
walk(directory)
try:
full_path = storage.path(directory)
except Exception:
continue
os.rmdir(full_path)
walk(path)
|
ryfeus/lambda-packs | refs/heads/master | Tensorflow/source/tensorflow/contrib/tensor_forest/client/eval_metrics.py | 52 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of functions to be used as evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
INFERENCE_PROB_NAME = prediction_key.PredictionKey.PROBABILITIES
INFERENCE_PRED_NAME = prediction_key.PredictionKey.CLASSES
FEATURE_IMPORTANCE_NAME = 'global_feature_importance'
def _top_k_generator(k):
def _top_k(probabilities, targets):
targets = math_ops.to_int32(targets)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, squeeze_dims=[1])
return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k))
return _top_k
def _accuracy(predictions, targets, weights=None):
return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
def _r2(probabilities, targets, weights=None):
targets = math_ops.to_float(targets)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
squares_residuals = math_ops.reduce_sum(
math_ops.square(targets - probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
return metric_ops.streaming_mean(score, weights=weights)
def _squeeze_and_onehot(targets, depth):
targets = array_ops.squeeze(targets, squeeze_dims=[1])
return array_ops.one_hot(math_ops.to_int32(targets), depth)
def _sigmoid_entropy(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.sigmoid_cross_entropy(probabilities,
_squeeze_and_onehot(
targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _softmax_entropy(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.to_int32(targets)),
weights=weights)
def _predictions(predictions, unused_targets, **unused_kwargs):
return predictions
def _class_log_loss(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.log_loss(probabilities,
_squeeze_and_onehot(targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _precision(predictions, targets, weights=None):
return metric_ops.streaming_precision(predictions, targets, weights=weights)
def _precision_at_thresholds(predictions, targets, weights=None):
return metric_ops.streaming_precision_at_thresholds(
array_ops.slice(predictions, [0, 1], [-1, 1]),
targets,
np.arange(
0, 1, 0.01, dtype=np.float32),
weights=weights)
def _recall(predictions, targets, weights=None):
return metric_ops.streaming_recall(predictions, targets, weights=weights)
def _recall_at_thresholds(predictions, targets, weights=None):
return metric_ops.streaming_recall_at_thresholds(
array_ops.slice(predictions, [0, 1], [-1, 1]),
targets,
np.arange(
0, 1, 0.01, dtype=np.float32),
weights=weights)
def _auc(probs, targets, weights=None):
return metric_ops.streaming_auc(array_ops.slice(probs, [0, 1], [-1, 1]),
targets, weights=weights)
_EVAL_METRICS = {
'auc': _auc,
'sigmoid_entropy': _sigmoid_entropy,
'softmax_entropy': _softmax_entropy,
'accuracy': _accuracy,
'r2': _r2,
'predictions': _predictions,
'classification_log_loss': _class_log_loss,
'precision': _precision,
'precision_at_thresholds': _precision_at_thresholds,
'recall': _recall,
'recall_at_thresholds': _recall_at_thresholds,
'top_5': _top_k_generator(5)
}
_PREDICTION_KEYS = {
'auc': INFERENCE_PROB_NAME,
'sigmoid_entropy': INFERENCE_PROB_NAME,
'softmax_entropy': INFERENCE_PROB_NAME,
'accuracy': INFERENCE_PRED_NAME,
'r2': prediction_key.PredictionKey.SCORES,
'predictions': INFERENCE_PRED_NAME,
'classification_log_loss': INFERENCE_PROB_NAME,
'precision': INFERENCE_PRED_NAME,
'precision_at_thresholds': INFERENCE_PROB_NAME,
'recall': INFERENCE_PRED_NAME,
'recall_at_thresholds': INFERENCE_PROB_NAME,
'top_5': INFERENCE_PROB_NAME
}
def get_metric(metric_name):
"""Given a metric name, return the corresponding metric function."""
return _EVAL_METRICS[metric_name]
def get_prediction_key(metric_name):
return _PREDICTION_KEYS[metric_name]
|
privacyidea/privacyidea | refs/heads/master | privacyidea/webui/__init__.py | 7 | __author__ = 'cornelius koelbel <cornelius@privacyidea.org>'
|
neerajvashistha/pa-dude | refs/heads/master | lib/python2.7/site-packages/django/contrib/contenttypes/fields.py | 58 | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() methods if the model
# on the other end of this relation is ordered with respect to this.
def matching_gfk(field):
return (
isinstance(field, GenericForeignKey) and
self.content_type_field_name == field.ct_field and
self.object_id_field_name == field.fk_field
)
def make_generic_foreign_order_accessors(related_model, model):
if matching_gfk(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first. but must be." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
|
sahilshekhawat/sympy | refs/heads/master | sympy/series/tests/test_residues.py | 5 | from sympy import residue, Symbol, Function, sin, S, I, exp, log, pi, factorial
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import x, z, a, s
def test_basic1():
assert residue(1/x, x, 0) == 1
assert residue(-2/x, x, 0) == -2
assert residue(81/x, x, 0) == 81
assert residue(1/x**2, x, 0) == 0
assert residue(0, x, 0) == 0
assert residue(5, x, 0) == 0
assert residue(x, x, 0) == 0
assert residue(x**2, x, 0) == 0
def test_basic2():
assert residue(1/x, x, 1) == 0
assert residue(-2/x, x, 1) == 0
assert residue(81/x, x, -1) == 0
assert residue(1/x**2, x, 1) == 0
assert residue(0, x, 1) == 0
assert residue(5, x, 1) == 0
assert residue(x, x, 1) == 0
assert residue(x**2, x, 5) == 0
def _test_f():
f = Function("f")
assert residue(f(x)/x**5, x, 0) == f(x).diff(x, 4).subs(x, 0)/24
def test_functions():
assert residue(1/sin(x), x, 0) == 1
assert residue(2/sin(x), x, 0) == 2
assert residue(1/sin(x)**2, x, 0) == 0
assert residue(1/sin(x)**5, x, 0) == S(3)/8
def test_expressions():
assert residue(1/(x + 1), x, 0) == 0
assert residue(1/(x + 1), x, -1) == 1
assert residue(1/(x**2 + 1), x, -1) == 0
assert residue(1/(x**2 + 1), x, I) == -I/2
assert residue(1/(x**2 + 1), x, -I) == I/2
assert residue(1/(x**4 + 1), x, 0) == 0
@XFAIL
def test_expressions_failing():
assert residue(1/(x**4 + 1), x, exp(I*pi/4)) == -(S(1)/4 + I/4)/sqrt(2)
n = Symbol('n', integer=True, positive=True)
assert residue(exp(z)/(z - pi*I/4*a)**n, z, I*pi*a) == \
exp(I*pi*a/4)/factorial(n - 1)
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/4/a**3
def test_NotImplemented():
raises(NotImplementedError, lambda: residue(exp(1/z), z, 0))
def test_bug():
assert residue(2**(z)*(s + z)*(1 - s - z)/z**2, z, 0) == \
1 + s*log(2) - s**2*log(2) - 2*s
def test_issue_5654():
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/(4*a**3)
def test_issue_6499():
assert residue(1/(exp(z) - 1), z, 0) == 1
|
HwisooSo/gemV-update | refs/heads/gemV | src/python/m5/simulate.py | 3 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
import atexit
import os
import sys
# import the SWIG-wrapped main C++ functions
import _m5.drain
import _m5.core
from _m5.stats import updateEvents as updateStatEvents
import stats
import SimObject
import ticks
import objects
from m5.util.dot_writer import do_dot, do_dvfs_dot
from util import fatal
from util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
"atomic_noncaching" : objects.params.atomic_noncaching,
}
_drain_manager = _m5.drain.DrainManager.instance()
# The final hook to generate .ini files. Called from the user script
# once the config is built.
def instantiate(ckpt_dir=None):
from m5 import options
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
if options.dump_config:
ini_file = file(os.path.join(options.outdir, options.dump_config), 'w')
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = file(os.path.join(options.outdir, options.json_config), 'w')
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
do_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
# Do a third pass to initialize statistics
for obj in root.descendants(): obj.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants(): obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants(): obj.regProbeListeners()
# We want to generate the DVFS diagram for the system. This can only be
# done once all of the CPP objects have been created and initialised so
# that we are able to figure out which object belongs to which domain.
if options.dot_dvfs_config:
do_dvfs_dot(root, options.outdir, options.dot_dvfs_config)
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
_drain_manager.preCheckpointRestore()
ckpt = _m5.core.getCheckpoint(ckpt_dir)
_m5.core.unserializeGlobals(ckpt);
for obj in root.descendants(): obj.loadState(ckpt)
else:
for obj in root.descendants(): obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_startup = True
def simulate(*args, **kwargs):
global need_startup
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
# We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(_m5.core.doExitCleanup)
# Reset to put the stats in a consistent state.
stats.reset()
if _drain_manager.isDrained():
_drain_manager.resume()
return _m5.event.simulate(*args, **kwargs)
def drain():
"""Drain the simulator in preparation of a checkpoint or memory mode
switch.
This operation is a no-op if the simulator is already in the
Drained state.
"""
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
# Try to drain the system. The drain is successful if all
# objects are done without simulation. We need to simulate
# more if not.
if _drain_manager.tryDrain():
return True
# WARNING: if a valid exit event occurs while draining, it
# will not get returned to the user script
exit_event = _m5.event.simulate()
while exit_event.getCause() != 'Finished drain':
exit_event = simulate()
return False
# Don't try to drain a system that is already drained
is_drained = _drain_manager.isDrained()
while not is_drained:
is_drained = _drain()
assert _drain_manager.isDrained(), "Drain state inconsistent"
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError, "Checkpoint must be called on a root object."
drain()
memWriteback(root)
print "Writing checkpoint"
_m5.core.serializeAll(dir)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError, "Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System)
if system.getMemoryMode() != mode:
system.setMemoryMode(mode)
else:
print "System already in target mode. Memory mode unchanged."
def switchCpus(system, cpuList, verbose=True):
"""Switch CPUs in a system.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
"""
if verbose:
print "switching cpus"
if not isinstance(cpuList, list):
raise RuntimeError, "Must pass a list to this function"
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError, "List must have tuples of (oldCPU,newCPU)"
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % old_cpu
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % new_cpu
if new_cpu in old_cpu_set:
raise RuntimeError, \
"New CPU (%s) is in the list of old CPUs." % (old_cpu,)
if not new_cpu.switchedOut():
raise RuntimeError, \
"New CPU (%s) is already active." % (new_cpu,)
if not new_cpu.support_take_over():
raise RuntimeError, \
"New CPU (%s) does not support CPU handover." % (old_cpu,)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError, \
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0])
if old_cpu.switchedOut():
raise RuntimeError, \
"Old CPU (%s) is inactive." % (new_cpu,)
if not old_cpu.support_take_over():
raise RuntimeError, \
"Old CPU (%s) does not support CPU handover." % (old_cpu,)
try:
memory_mode = _memory_modes[memory_mode_name]
except KeyError:
raise RuntimeError, "Invalid memory mode (%s)" % memory_mode_name
drain()
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == objects.params.atomic_noncaching:
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
def notifyFork(root):
for obj in root.descendants():
obj.notifyFork()
fork_count = 0
def fork(simout="%(parent)s.f%(fork_seq)i"):
"""Fork the simulator.
This function forks the simulator. After forking the simulator,
the child process gets its output files redirected to a new output
directory. The default name of the output directory is the same as
the parent with the suffix ".fN" added where N is the fork
sequence number. The name of the output directory can be
overridden using the simout keyword argument.
Output file formatting dictionary:
parent -- Path to the parent process's output directory.
fork_seq -- Fork sequence number.
pid -- PID of the child process.
Keyword Arguments:
simout -- New simulation output directory.
Return Value:
pid of the child process or 0 if running in the child.
"""
from m5 import options
global fork_count
if not _m5.core.listenersDisabled():
raise RuntimeError, "Can not fork a simulator with listeners enabled"
drain()
try:
pid = os.fork()
except OSError, e:
raise e
if pid == 0:
# In child, notify objects of the fork
root = objects.Root.getInstance()
notifyFork(root)
# Setup a new output directory
parent = options.outdir
options.outdir = simout % {
"parent" : parent,
"fork_seq" : fork_count,
"pid" : os.getpid(),
}
_m5.core.setOutputDir(options.outdir)
else:
fork_count += 1
return pid
from _m5.core import disableAllListeners, listenersDisabled
from _m5.core import curTick
|
c0defreak/python-for-android | refs/heads/master | python-modules/twisted/twisted/plugins/twisted_conch.py | 61 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedSSH = ServiceMaker(
"Twisted Conch Server",
"twisted.conch.tap",
"A Conch SSH service.",
"conch")
TwistedManhole = ServiceMaker(
"Twisted Manhole (new)",
"twisted.conch.manhole_tap",
("An interactive remote debugger service accessible via telnet "
"and ssh and providing syntax coloring and basic line editing "
"functionality."),
"manhole")
|
akiyoko/oscar_project | refs/heads/master | src/oscar/views/__init__.py | 68 | from django.shortcuts import render
def handler403(request):
return render(request, '403.html', status=403)
def handler404(request):
return render(request, '404.html', status=404)
def handler500(request):
return render(request, '500.html', status=500)
def sort_queryset(queryset, request, allowed_sorts, default=None):
""" Sorts the queryset by one of allowed_sorts based on parameters
'sort' and 'dir' from request """
sort = request.GET.get('sort', None)
if sort in allowed_sorts:
direction = request.GET.get('dir', 'asc')
sort = ('-' if direction == 'desc' else '') + sort
queryset = queryset.order_by(sort)
elif default:
queryset = queryset.order_by(default)
return queryset
|
rationalAgent/edx-platform-custom | refs/heads/master | common/djangoapps/external_auth/tests/test_shib.py | 2 | """
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.sessions.backends.base import SessionBase
from django.utils.importlib import import_module
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.django import modulestore
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from external_auth.models import ExternalAuthMap
from external_auth.views import shib_login, course_specific_login, course_specific_register
from student.views import create_account, change_enrollment
from student.models import UserProfile, Registration, CourseEnrollment
from student.tests.factories import UserFactory
#Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
#attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
#b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
#For the sake of python convention we'll make all of these variable names ALL_CAPS
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = 'test_user@stanford.edu'
MAILS = [None, '', 'test_user@stanford.edu']
GIVENNAMES = [None, '', 'Jason', 'jas\xc3\xb6n; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', 'Bau', '\xe5\x8c\x85; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
yield _build_identity_dict(mail, given_name, surname)
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE, SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(ModuleStoreTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
request_factory = RequestFactory()
def setUp(self):
self.store = modulestore()
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_request = self.request_factory.get('/shib-login')
no_remote_user_request.META.update({'Shib-Identity-Provider': IDP})
no_remote_user_response = shib_login(no_remote_user_request)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_request = self.request_factory.get('/shib-login')
no_idp_request.META.update({'REMOTE_USER': REMOTE_USER})
no_idp_response = shib_login(no_idp_request)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
user_w_map = UserFactory.create(email='withmap@stanford.edu')
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='womap@stanford.edu')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='inactive@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['withmap@stanford.edu', 'womap@stanford.edu',
'testuser2@someother_idp.com', 'inactive@stanford.edu']
for idp in idps:
for remote_user in remote_users:
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({'Shib-Identity-Provider': idp,
'REMOTE_USER': remote_user,
'mail': remote_user})
request.user = AnonymousUser()
response = shib_login(request)
if idp == "https://idp.stanford.edu/" and remote_user == 'withmap@stanford.edu':
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_w_map)
self.assertEqual(response['Location'], '/')
elif idp == "https://idp.stanford.edu/" and remote_user == 'inactive@stanford.edu':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
elif idp == "https://idp.stanford.edu/" and remote_user == 'womap@stanford.edu':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_wo_map)
self.assertEqual(response['Location'], '/')
elif idp == "https://someother.idp.com/" and remote_user in \
['withmap@stanford.edu', 'womap@stanford.edu', 'inactive@stanford.edu']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<title>Register for")
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_registration_form(self):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
for identity in gen_all_identities():
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_HTML = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_HTML)
else:
self.assertNotContains(response, mail_input_HTML)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
fullname_input_HTML = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty:
self.assertContains(response, fullname_input_HTML)
else:
self.assertNotContains(response, fullname_input_HTML)
#clean up b/c we don't want existing ExternalAuthMap for the next run
client.session['ExternalAuthMap'].delete()
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_registration_formSubmit(self):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
for identity in gen_all_identities():
#First we pop the registration form
client = DjangoTestClient()
response1 = client.get(path='/shib-login/', data={}, follow=False, **identity)
#Then we have the user answer the registration form
postvars = {'email': 'post_email@stanford.edu',
'username': 'post_username',
'password': 'post_password',
'name': 'post_name',
'terms_of_service': 'true',
'honor_code': 'true'}
#use RequestFactory instead of TestClient here because we want access to request.user
request2 = self.request_factory.post('/create_account', data=postvars)
request2.session = client.session
request2.user = AnonymousUser()
response2 = create_account(request2)
user = request2.user
mail = identity.get('mail')
#check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
#check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
if sn_empty and given_name_empty:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
#clean up for next loop
request2.session['ExternalAuthMap'].delete()
UserProfile.objects.filter(user=user).delete()
Registration.objects.filter(user=user).delete()
user.delete()
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_course_specificLoginAndReg(self):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
# Test for cases where course is found
for domain in ["", "shib:https://idp.stanford.edu/"]:
#set domains
course.enrollment_domain = domain
metadata = own_metadata(course)
metadata['enrollment_domain'] = domain
self.store.update_metadata(course.location.url(), metadata)
#setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
#setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
#create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')
shib_course.enrollment_domain = 'shib:https://idp.stanford.edu/'
metadata = own_metadata(shib_course)
metadata['enrollment_domain'] = shib_course.enrollment_domain
self.store.update_metadata(shib_course.location.url(), metadata)
open_enroll_course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
open_enroll_course.enrollment_domain = ''
metadata = own_metadata(open_enroll_course)
metadata['enrollment_domain'] = open_enroll_course.enrollment_domain
self.store.update_metadata(open_enroll_course.location.url(), metadata)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "teststudent2@other.edu"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='testuser1@other.edu',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "teststudent3@gmail.com"
int_student.save()
#Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post('/change_enrollment')
request.POST.update({'enrollment_action': 'enroll',
'course_id': course.id})
request.user = student
response = change_enrollment(request)
#if course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertEqual(CourseEnrollment.objects.filter(user=student, course_id=course.id).count(), 1)
#clean up
CourseEnrollment.objects.filter(user=student, course_id=course.id).delete()
else:
self.assertEqual(response.status_code, 400)
self.assertEqual(CourseEnrollment.objects.filter(user=student, course_id=course.id).count(), 0)
@unittest.skipUnless(settings.MITX_FEATURES.get('AUTH_USE_SHIB'), True)
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login can auto-enroll in a class with GET params
"""
if not settings.MITX_FEATURES.get('AUTH_USE_SHIB'):
return
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')
course.enrollment_domain = 'shib:https://idp.stanford.edu/'
metadata = own_metadata(course)
metadata['enrollment_domain'] = course.enrollment_domain
self.store.update_metadata(course.location.url(), metadata)
#use django test client for sessions and url processing
#no enrollment before trying
self.assertEqual(CourseEnrollment.objects.filter(user=student, course_id=course.id).count(), 0)
self.client.logout()
request_kwargs = {'path': '/shib-login/',
'data': {'enrollment_action': 'enroll', 'course_id': course.id},
'follow': False,
'REMOTE_USER': 'testuser@stanford.edu',
'Shib-Identity-Provider': 'https://idp.stanford.edu/'}
response = self.client.get(**request_kwargs)
#successful login is a redirect to "/"
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/')
#now there is enrollment
self.assertEqual(CourseEnrollment.objects.filter(user=student, course_id=course.id).count(), 1)
|
lucafavatella/intellij-community | refs/heads/cli-wip | python/testData/deprecation/deprecatedModule.py | 83 | import warnings
warnings.warn("the deprecated module is deprecated; use a non-deprecated module instead",
DeprecationWarning, 2)
|
Baumelbi/IntroPython2016 | refs/heads/master | students/sheree/lightning_talk/holy_shazbot.py | 3 | '''
This is my handcrafted artisanal locally sourced gluten free vegan docstrings.
'''
#I stripped the content because it's private |
andyzsf/Cactus- | refs/heads/master | cactus/tests/deployment/test_engine_api.py | 14 | #coding:utf-8
from cactus.tests.deployment import DummyUI, DummySite, DummyDeploymentEngine, BaseDeploymentTestCase
class BucketCreateTestCase(BaseDeploymentTestCase):
def setUp(self):
super(BucketCreateTestCase, self).setUp()
self.ui = DummyUI()
self.site = DummySite(self.test_dir, self.ui)
self.engine = DummyDeploymentEngine(self.site)
self.engine.configure()
def test_bucket_attrs(self):
"""
Test that the bucket name is provided
"""
self.assertEqual("test-bucket", self.engine.bucket_name)
self.assertEqual("test-bucket-obj", self.engine.bucket)
def test_config_saved(self):
"""
Test that the configuration is saved
"""
self.assertEqual("test-bucket", self.site.config.get("test-conf-entry"))
self.assertEqual("http://test-bucket.com", self.site.config.get("test-conf-entry-website"))
def test_credentials_saved(self):
"""
Test that the credentials are saved
"""
self.assertTrue(self.engine.credentials_manager.saved)
|
haxoza/django | refs/heads/master | tests/servers/views.py | 384 | from django.http import HttpResponse
from .models import Person
def example_view(request):
return HttpResponse('example view')
def model_view(request):
people = Person.objects.all()
return HttpResponse('\n'.join(person.name for person in people))
def create_model_instance(request):
person = Person(name='emily')
person.save()
return HttpResponse('')
def environ_view(request):
return HttpResponse("\n".join("%s: %r" % (k, v) for k, v in request.environ.items()))
|
aospx-kitkat/platform_external_chromium_org | refs/heads/kitkat | tools/deep_memory_profiler/lib/subcommand.py | 24 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import re
from lib.bucket import BucketSet
from lib.dump import Dump, DumpList
from lib.symbol import SymbolDataSources, SymbolMappingCache, SymbolFinder
from lib.symbol import proc_maps
from lib.symbol import FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS, TYPEINFO_SYMBOLS
LOGGER = logging.getLogger('dmprof')
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHROME_SRC_PATH = os.path.join(BASE_PATH, os.pardir, os.pardir)
class SubCommand(object):
"""Subclasses are a subcommand for this executable.
See COMMANDS in main() in dmprof.py.
"""
_DEVICE_BINDIRS = ['/data/data/', '/data/app-lib/', '/data/local/tmp']
def __init__(self, usage):
self._parser = optparse.OptionParser(usage)
@staticmethod
def load_basic_files(
dump_path, multiple, no_dump=False, alternative_dirs=None):
prefix = SubCommand._find_prefix(dump_path)
# If the target process is estimated to be working on Android, converts
# a path in the Android device to a path estimated to be corresponding in
# the host. Use --alternative-dirs to specify the conversion manually.
if not alternative_dirs:
alternative_dirs = SubCommand._estimate_alternative_dirs(prefix)
if alternative_dirs:
for device, host in alternative_dirs.iteritems():
LOGGER.info('Assuming %s on device as %s on host' % (device, host))
symbol_data_sources = SymbolDataSources(prefix, alternative_dirs)
symbol_data_sources.prepare()
bucket_set = BucketSet()
bucket_set.load(prefix)
if not no_dump:
if multiple:
dump_list = DumpList.load(SubCommand._find_all_dumps(dump_path))
else:
dump = Dump.load(dump_path)
symbol_mapping_cache = SymbolMappingCache()
with open(prefix + '.cache.function', 'a+') as cache_f:
symbol_mapping_cache.update(
FUNCTION_SYMBOLS, bucket_set,
SymbolFinder(FUNCTION_SYMBOLS, symbol_data_sources), cache_f)
with open(prefix + '.cache.typeinfo', 'a+') as cache_f:
symbol_mapping_cache.update(
TYPEINFO_SYMBOLS, bucket_set,
SymbolFinder(TYPEINFO_SYMBOLS, symbol_data_sources), cache_f)
with open(prefix + '.cache.sourcefile', 'a+') as cache_f:
symbol_mapping_cache.update(
SOURCEFILE_SYMBOLS, bucket_set,
SymbolFinder(SOURCEFILE_SYMBOLS, symbol_data_sources), cache_f)
bucket_set.symbolize(symbol_mapping_cache)
if no_dump:
return bucket_set
elif multiple:
return (bucket_set, dump_list)
else:
return (bucket_set, dump)
@staticmethod
def _find_prefix(path):
return re.sub('\.[0-9][0-9][0-9][0-9]\.heap', '', path)
@staticmethod
def _estimate_alternative_dirs(prefix):
"""Estimates a path in host from a corresponding path in target device.
For Android, dmprof.py should find symbol information from binaries in
the host instead of the Android device because dmprof.py doesn't run on
the Android device. This method estimates a path in the host
corresponding to a path in the Android device.
Returns:
A dict that maps a path in the Android device to a path in the host.
If a file in SubCommand._DEVICE_BINDIRS is found in /proc/maps, it
assumes the process was running on Android and maps the path to
"out/Debug/lib" in the Chromium directory. An empty dict is returned
unless Android.
"""
device_lib_path_candidates = set()
with open(prefix + '.maps') as maps_f:
maps = proc_maps.ProcMaps.load(maps_f)
for entry in maps:
name = entry.as_dict()['name']
if any([base_dir in name for base_dir in SubCommand._DEVICE_BINDIRS]):
device_lib_path_candidates.add(os.path.dirname(name))
if len(device_lib_path_candidates) == 1:
return {device_lib_path_candidates.pop(): os.path.join(
CHROME_SRC_PATH, 'out', 'Debug', 'lib')}
else:
return {}
@staticmethod
def _find_all_dumps(dump_path):
prefix = SubCommand._find_prefix(dump_path)
dump_path_list = [dump_path]
n = int(dump_path[len(dump_path) - 9 : len(dump_path) - 5])
n += 1
skipped = 0
while True:
p = '%s.%04d.heap' % (prefix, n)
if os.path.exists(p) and os.stat(p).st_size:
dump_path_list.append(p)
else:
if skipped > 10:
break
skipped += 1
n += 1
return dump_path_list
@staticmethod
def _find_all_buckets(dump_path):
prefix = SubCommand._find_prefix(dump_path)
bucket_path_list = []
n = 0
while True:
path = '%s.%04d.buckets' % (prefix, n)
if not os.path.exists(path):
if n > 10:
break
n += 1
continue
bucket_path_list.append(path)
n += 1
return bucket_path_list
def _parse_args(self, sys_argv, required):
options, args = self._parser.parse_args(sys_argv)
if len(args) < required + 1:
self._parser.error('needs %d argument(s).\n' % required)
return None
return (options, args)
@staticmethod
def _parse_policy_list(options_policy):
if options_policy:
return options_policy.split(',')
else:
return None
|
eduNEXT/edunext-platform | refs/heads/master | common/djangoapps/entitlements/api/v1/tests/test_serializers.py | 4 |
import unittest
from django.conf import settings
from django.test import RequestFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
# Entitlements is not in CMS' INSTALLED_APPS so these imports will error during test collection
if settings.ROOT_URLCONF == 'lms.urls':
from entitlements.api.v1.serializers import CourseEntitlementSerializer
from entitlements.tests.factories import CourseEntitlementFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EntitlementsSerializerTests(ModuleStoreTestCase):
def setUp(self):
super(EntitlementsSerializerTests, self).setUp()
def test_data(self):
entitlement = CourseEntitlementFactory()
request = RequestFactory().get('')
serializer = CourseEntitlementSerializer(entitlement, context={'request': request})
expected = {
'user': entitlement.user.username,
'uuid': str(entitlement.uuid),
'expired_at': entitlement.expired_at,
'course_uuid': str(entitlement.course_uuid),
'mode': entitlement.mode,
'refund_locked': False,
'order_number': entitlement.order_number,
'created': entitlement.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'modified': entitlement.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'support_details': [],
}
assert serializer.data == expected
|
annegabrielle/secure_adhoc_network_ns-3 | refs/heads/master | ns3_source_code/ns-3.10/.waf-1.5.16-e6d03192b5ddfa5ef2c8d65308e48e42/wafadmin/Tools/winres.py | 6 | #! /usr/bin/env python
# encoding: utf-8
import os,sys,re
import TaskGen,Task
from Utils import quote_whitespace
from TaskGen import extension
EXT_WINRC=['.rc']
winrc_str='${WINRC} ${_CPPDEFFLAGS} ${_CCDEFFLAGS} ${WINRCFLAGS} ${_CPPINCFLAGS} ${_CCINCFLAGS} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}'
def rc_file(self,node):
obj_ext='.rc.o'
if self.env['WINRC_TGT_F']=='/fo':obj_ext='.res'
rctask=self.create_task('winrc',node,node.change_ext(obj_ext))
self.compiled_tasks.append(rctask)
Task.simple_task_type('winrc',winrc_str,color='BLUE',before='cc cxx',shell=False)
def detect(conf):
v=conf.env
winrc=v['WINRC']
v['WINRC_TGT_F']='-o'
v['WINRC_SRC_F']='-i'
if not winrc:
if v['CC_NAME']in['gcc','cc','g++','c++']:
winrc=conf.find_program('windres',var='WINRC',path_list=v['PATH'])
elif v['CC_NAME']=='msvc':
winrc=conf.find_program('RC',var='WINRC',path_list=v['PATH'])
v['WINRC_TGT_F']='/fo'
v['WINRC_SRC_F']=''
if not winrc:
conf.fatal('winrc was not found!')
v['WINRCFLAGS']=''
extension(EXT_WINRC)(rc_file)
|
nathanielparke/huxley | refs/heads/master | huxley/logging/migrations/0001_initial.py | 3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(max_length=200)),
('message', models.TextField()),
('timestamp', models.DateTimeField(null=True, verbose_name=b'timestamp', blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
coreynicholson/youtube-dl | refs/heads/master | youtube_dl/extractor/patreon.py | 41 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import js_to_json
class PatreonIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(?P<id>[^&#]+)'
_TESTS = [
{
'url': 'http://www.patreon.com/creation?hid=743933',
'md5': 'e25505eec1053a6e6813b8ed369875cc',
'info_dict': {
'id': '743933',
'ext': 'mp3',
'title': 'Episode 166: David Smalley of Dogma Debate',
'uploader': 'Cognitive Dissonance Podcast',
'thumbnail': 're:^https?://.*$',
},
},
{
'url': 'http://www.patreon.com/creation?hid=754133',
'md5': '3eb09345bf44bf60451b8b0b81759d0a',
'info_dict': {
'id': '754133',
'ext': 'mp3',
'title': 'CD 167 Extra',
'uploader': 'Cognitive Dissonance Podcast',
'thumbnail': 're:^https?://.*$',
},
},
{
'url': 'https://www.patreon.com/creation?hid=1682498',
'info_dict': {
'id': 'SU4fj_aEMVw',
'ext': 'mp4',
'title': 'I\'m on Patreon!',
'uploader': 'TraciJHines',
'thumbnail': 're:^https?://.*$',
'upload_date': '20150211',
'description': 'md5:c5a706b1f687817a3de09db1eb93acd4',
'uploader_id': 'TraciJHines',
},
'params': {
'noplaylist': True,
'skip_download': True,
}
}
]
# Currently Patreon exposes download URL via hidden CSS, so login is not
# needed. Keeping this commented for when this inevitably changes.
'''
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'redirectUrl': 'http://www.patreon.com/',
'email': username,
'password': password,
}
request = sanitized_Request(
'https://www.patreon.com/processLogin',
compat_urllib_parse_urlencode(login_form).encode('utf-8')
)
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
if re.search(r'onLoginFailed', login_page):
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
def _real_initialize(self):
self._login()
'''
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
attach_fn = self._html_search_regex(
r'<div class="attach"><a target="_blank" href="([^"]+)">',
webpage, 'attachment URL', default=None)
embed = self._html_search_regex(
r'<div[^>]+id="watchCreation"[^>]*>\s*<iframe[^>]+src="([^"]+)"',
webpage, 'embedded URL', default=None)
if attach_fn is not None:
video_url = 'http://www.patreon.com' + attach_fn
thumbnail = self._og_search_thumbnail(webpage)
uploader = self._html_search_regex(
r'<strong>(.*?)</strong> is creating', webpage, 'uploader')
elif embed is not None:
return self.url_result(embed)
else:
playlist = self._parse_json(self._search_regex(
r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])',
webpage, 'playlist JSON'),
video_id, transform_source=js_to_json)
data = playlist[0]
video_url = self._proto_relative_url(data['mp3'])
thumbnail = self._proto_relative_url(data.get('cover'))
uploader = data.get('artist')
return {
'id': video_id,
'url': video_url,
'ext': 'mp3',
'title': title,
'uploader': uploader,
'thumbnail': thumbnail,
}
|
norn/bustime | refs/heads/master | utils/nbusstops-export.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from devinclude import *
from bustime.models import *
import time
import urllib
import socket
import random
from django.contrib.sessions.models import Session
import csv
from django.contrib.gis.geos import Point
import codecs
import json
import pickle
#city=City.objects.get(name="Красноярск")
#city=City.objects.get(name="Калининград")
#city=City.objects.get(name="Санкт-Петербург")
#city=City.objects.get(name="Томск")
#city=City.objects.get(name="Кемерово")
#city=City.objects.get(name="Пермь")
city=City.objects.get(name="Казань")
names=[]
names_done={}
BEG="var stops=["
END="];"
NBusStop.objects.filter(city=city)
for nb in NBusStop.objects.filter(city=city).order_by('name'):#.distinct('name')
if not names_done.get(nb.name):
ids = NBusStop.objects.filter(city=city, name=nb.name).order_by('id').values_list("id", flat=True)
names.append("{value:'%s',ids:%s}"%(nb.name, ids))
names_done[nb.name]=1
print "%s%s%s"%(BEG, ",".join(names).encode('utf8'), END)
|
steebchen/youtube-dl | refs/heads/master | youtube_dl/extractor/tvnoe.py | 59 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
get_element_by_class,
js_to_json,
)
class TVNoeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.tvnoe.cz/video/10362',
'md5': 'aee983f279aab96ec45ab6e2abb3c2ca',
'info_dict': {
'id': '10362',
'ext': 'mp4',
'series': 'Noční univerzita',
'title': 'prof. Tomáš Halík, Th.D. - Návrat náboženství a střet civilizací',
'description': 'md5:f337bae384e1a531a52c55ebc50fff41',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'<iframe[^>]+src="([^"]+)"', webpage, 'iframe URL')
ifs_page = self._download_webpage(iframe_url, video_id)
jwplayer_data = self._find_jwplayer_data(
ifs_page, video_id, transform_source=js_to_json)
info_dict = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=iframe_url)
info_dict.update({
'id': video_id,
'title': clean_html(get_element_by_class(
'field-name-field-podnazev', webpage)),
'description': clean_html(get_element_by_class(
'field-name-body', webpage)),
'series': clean_html(get_element_by_class('title', webpage))
})
return info_dict
|
sandvine/horizon | refs/heads/master | horizon/contrib/bootstrap_datepicker.py | 87 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Map Horizon languages to datepicker locales
LOCALE_MAPPING = {
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'ca': 'ca',
'cs': 'cs',
'cy': 'cy',
'da': 'da',
'de': 'de',
'el': 'el',
'es': 'es',
'et': 'et',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'gl': 'gl',
'he': 'he',
'hr': 'hr',
'hu': 'hu',
'id': 'id',
'is': 'is',
'it': 'it',
'ja': 'ja',
'ka': 'ka',
'kk': 'kk',
'ko': 'kr', # difference between horizon and datepicker
'lt': 'lt',
'lv': 'lv',
'mk': 'mk',
'ms': 'ms',
'nb': 'nb',
'nl-be': 'nl-BE',
'nl': 'nl',
'no': 'no',
'pl': 'pl',
'pt-br': 'pt-BR',
'pt': 'pt',
'ro': 'ro',
'rs-latin': 'rs-latin',
'sr': 'rs', # difference between horizon and datepicker
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sv': 'sv',
'sw': 'sw',
'th': 'th',
'tr': 'tr',
'ua': 'ua',
'vi': 'vi',
'zh-cn': 'zh-CN',
'zh-tw': 'zh-TW',
}
|
ronaldahmed/SLAM-for-ugv | refs/heads/master | neural-navigation-with-lstm/MARCO/Utility/logger.py | 2 | import gzip, logging, os, os.path, sys, time
Sep ={
'DEBUG' : '~',
'INFO' : '.',
'STAGE' : '=',
'RUN' : '!',
'WARNING' : '.',
'ERROR' : '_',
'CRITICAL' : '#',
}
def initLogger(loggerName,consoleLevel=logging.INFO,doTrace=True,LogDir='Logs'):
global timeStamp,logger
logger = logging.getLogger(loggerName)
logger.setLevel(consoleLevel)
timeStamp = time.strftime("%Y-%m-%d-%H-%M")
logging.addLevelName(24, 'STAGE') # Completion of a stage
logging.addLevelName(26, 'RUN') # Completion of a run
Summary = ('Summary', 26, '%(message)s')
Trace = ('Trace', logging.DEBUG, '%(asctime)s %(levelname)-8s %(message)s')
if doTrace: Logs = (Summary,Trace)
else: Logs = tuple()
for logname,level,fmt in Logs:
LogFile = '-'.join((loggerName,logname,timeStamp))+'.log'
logHandler = logging.FileHandler(os.path.join(os.getcwd(), LogDir,LogFile), 'w')
logHandler.setLevel(level)
logHandler.setFormatter(logging.Formatter(fmt, '%m-%d %H:%M:%S'))
logger.addHandler(logHandler)
# Log info and above to console without timestamps
console = logging.StreamHandler(sys.stdout)
if doTrace: console.setLevel(logging.INFO)
else: console.setLevel(24)
console.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console)
def shutdownLogger(): logging.shutdown()
## for filename in ('Summary','Trace'):
## gzip('-'.join(('Following',logname,timeStamp))+'.log')
def debug(*msg): logger.debug(*msg)
def info(*msg): logger.info(*msg)
def stageComplete(*msg): logger.log(24,*msg)
def runComplete(*msg): logger.log(26,*msg)
def warning(*msg): logger.warning(*msg)
def error(*msg): logger.error(*msg)
def critical(*msg): logger.critical(*msg)
def flush(): pass
|
nmaswood/tv_scraping | refs/heads/master | lib/python2.7/site-packages/pip/commands/wheel.py | 239 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import RequirementCommand
from pip.index import PackageFinder
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import RequirementSet
from pip.utils import import_or_raise, normalize_path
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.wheel import WheelCache, WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help=("Build wheels into <dir>, where the default is "
"'<cwd>/wheelhouse'."),
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def check_required_packages(self):
import_or_raise(
'wheel.bdist_wheel',
CommandError,
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
pkg_resources = import_or_raise(
'pkg_resources',
CommandError,
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
def run(self, options, args):
self.check_required_packages()
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
with self._build_session(options) as session:
finder = PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
wheel_download_dir=options.wheel_dir
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
snak3ater/kernel_msm | refs/heads/Sema_M | tools/perf/util/setup.py | 4998 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
JFriel/honours_project | refs/heads/master | venv/lib/python2.7/site-packages/pip/commands/zip.py | 84 | import sys
import re
import fnmatch
import os
import shutil
import zipfile
from pip.util import display_path, backup_dir, rmtree
from pip.log import logger
from pip.exceptions import InstallationError
from pip.basecommand import Command
class ZipCommand(Command):
"""Zip individual packages."""
name = 'zip'
usage = """
%prog [options] <package> ..."""
summary = 'Zip individual packages.'
def __init__(self, *args, **kw):
super(ZipCommand, self).__init__(*args, **kw)
if self.name == 'zip':
self.cmd_opts.add_option(
'--unzip',
action='store_true',
dest='unzip',
help='Unzip (rather than zip) a package.')
else:
self.cmd_opts.add_option(
'--zip',
action='store_false',
dest='unzip',
default=True,
help='Zip (rather than unzip) a package.')
self.cmd_opts.add_option(
'--no-pyc',
action='store_true',
dest='no_pyc',
help='Do not include .pyc files in zip files (useful on Google App Engine).')
self.cmd_opts.add_option(
'-l', '--list',
action='store_true',
dest='list',
help='List the packages available, and their zip status.')
self.cmd_opts.add_option(
'--sort-files',
action='store_true',
dest='sort_files',
help='With --list, sort packages according to how many files they contain.')
self.cmd_opts.add_option(
'--path',
action='append',
dest='paths',
help='Restrict operations to the given paths (may include wildcards).')
self.cmd_opts.add_option(
'-n', '--simulate',
action='store_true',
help='Do not actually perform the zip/unzip operation.')
self.parser.insert_option_group(0, self.cmd_opts)
def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match + '*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug("Skipping path %s because it doesn't match %s"
% (path, ', '.join(self.select_paths)))
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug("Adding path %s because it doesn't match "
"anything already on sys.path" % match)
return result
def run(self, options, args):
self.select_paths = options.paths
self.simulate = options.simulate
if options.list:
return self.list(options, args)
if not args:
raise InstallationError(
'You must give at least one package to zip or unzip')
packages = []
for arg in args:
module_name, filename = self.find_package(arg)
if options.unzip and os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a zip file; cannot be unzipped'
% (module_name, filename))
elif not options.unzip and not os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a directory; cannot be zipped'
% (module_name, filename))
packages.append((module_name, filename))
last_status = None
for module_name, filename in packages:
if options.unzip:
last_status = self.unzip_package(module_name, filename)
else:
last_status = self.zip_package(module_name, filename, options.no_pyc)
return last_status
def unzip_package(self, module_name, filename):
zip_filename = os.path.dirname(filename)
if not os.path.isfile(zip_filename) and zipfile.is_zipfile(zip_filename):
raise InstallationError(
'Module %s (in %s) isn\'t located in a zip file in %s'
% (module_name, filename, zip_filename))
package_path = os.path.dirname(zip_filename)
if not package_path in self.paths():
logger.warn(
'Unpacking %s into %s, but %s is not on sys.path'
% (display_path(zip_filename), display_path(package_path),
display_path(package_path)))
logger.notify('Unzipping %s (in %s)' % (module_name, display_path(zip_filename)))
if self.simulate:
logger.notify('Skipping remaining operations because of --simulate')
return
logger.indent += 2
try:
## FIXME: this should be undoable:
zip = zipfile.ZipFile(zip_filename)
to_save = []
for info in zip.infolist():
name = info.filename
if name.startswith(module_name + os.path.sep):
content = zip.read(name)
dest = os.path.join(package_path, name)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not content and dest.endswith(os.path.sep):
if not os.path.exists(dest):
os.makedirs(dest)
else:
f = open(dest, 'wb')
f.write(content)
f.close()
else:
to_save.append((name, zip.read(name)))
zip.close()
if not to_save:
logger.info('Removing now-empty zip file %s' % display_path(zip_filename))
os.unlink(zip_filename)
self.remove_filename_from_pth(zip_filename)
else:
logger.info('Removing entries in %s/ from zip file %s' % (module_name, display_path(zip_filename)))
zip = zipfile.ZipFile(zip_filename, 'w')
for name, content in to_save:
zip.writestr(name, content)
zip.close()
finally:
logger.indent -= 2
def zip_package(self, module_name, filename, no_pyc):
orig_filename = filename
logger.notify('Zip %s (in %s)' % (module_name, display_path(filename)))
logger.indent += 2
if filename.endswith('.egg'):
dest_filename = filename
else:
dest_filename = filename + '.zip'
try:
## FIXME: I think this needs to be undoable:
if filename == dest_filename:
filename = backup_dir(orig_filename)
logger.notify('Moving %s aside to %s' % (orig_filename, filename))
if not self.simulate:
shutil.move(orig_filename, filename)
try:
logger.info('Creating zip file in %s' % display_path(dest_filename))
if not self.simulate:
zip = zipfile.ZipFile(dest_filename, 'w')
zip.writestr(module_name + '/', '')
for dirpath, dirnames, filenames in os.walk(filename):
if no_pyc:
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
for fns, is_dir in [(dirnames, True), (filenames, False)]:
for fn in fns:
full = os.path.join(dirpath, fn)
dest = os.path.join(module_name, dirpath[len(filename):].lstrip(os.path.sep), fn)
if is_dir:
zip.writestr(dest + '/', '')
else:
zip.write(full, dest)
zip.close()
logger.info('Removing old directory %s' % display_path(filename))
if not self.simulate:
rmtree(filename)
except:
## FIXME: need to do an undo here
raise
## FIXME: should also be undone:
self.add_filename_to_pth(dest_filename)
finally:
logger.indent -= 2
def remove_filename_from_pth(self, filename):
for pth in self.pth_files():
f = open(pth, 'r')
lines = f.readlines()
f.close()
new_lines = [
l for l in lines if l.strip() != filename]
if lines != new_lines:
logger.info('Removing reference to %s from .pth file %s'
% (display_path(filename), display_path(pth)))
if not [line for line in new_lines if line]:
logger.info('%s file would be empty: deleting' % display_path(pth))
if not self.simulate:
os.unlink(pth)
else:
if not self.simulate:
f = open(pth, 'wb')
f.writelines(new_lines)
f.close()
return
logger.warn('Cannot find a reference to %s in any .pth file' % display_path(filename))
def add_filename_to_pth(self, filename):
path = os.path.dirname(filename)
dest = filename + '.pth'
if path not in self.paths():
logger.warn('Adding .pth file %s, but it is not on sys.path' % display_path(dest))
if not self.simulate:
if os.path.exists(dest):
f = open(dest)
lines = f.readlines()
f.close()
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.append(filename + '\n')
else:
lines = [filename + '\n']
f = open(dest, 'wb')
f.writelines(lines)
f.close()
def pth_files(self):
for path in self.paths():
if not os.path.exists(path) or not os.path.isdir(path):
continue
for filename in os.listdir(path):
if filename.endswith('.pth'):
yield os.path.join(path, filename)
def find_package(self, package):
for path in self.paths():
full = os.path.join(path, package)
if os.path.exists(full):
return package, full
if not os.path.isdir(path) and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
try:
zip.read(os.path.join(package, '__init__.py'))
except KeyError:
pass
else:
zip.close()
return package, full
zip.close()
## FIXME: need special error for package.py case:
raise InstallationError(
'No package with the name %s found' % package)
def list(self, options, args):
if args:
raise InstallationError(
'You cannot give an argument with --list')
for path in sorted(self.paths()):
if not os.path.exists(path):
continue
basename = os.path.basename(path.rstrip(os.path.sep))
if os.path.isfile(path) and zipfile.is_zipfile(path):
if os.path.dirname(path) not in self.paths():
logger.notify('Zipped egg: %s' % display_path(path))
continue
if (basename != 'site-packages' and basename != 'dist-packages'
and not path.replace('\\', '/').endswith('lib/python')):
continue
logger.notify('In %s:' % display_path(path))
logger.indent += 2
zipped = []
unzipped = []
try:
for filename in sorted(os.listdir(path)):
ext = os.path.splitext(filename)[1].lower()
if ext in ('.pth', '.egg-info', '.egg-link'):
continue
if ext == '.py':
logger.info('Not displaying %s: not a package' % display_path(filename))
continue
full = os.path.join(path, filename)
if os.path.isdir(full):
unzipped.append((filename, self.count_package(full)))
elif zipfile.is_zipfile(full):
zipped.append(filename)
else:
logger.info('Unknown file: %s' % display_path(filename))
if zipped:
logger.notify('Zipped packages:')
logger.indent += 2
try:
for filename in zipped:
logger.notify(filename)
finally:
logger.indent -= 2
else:
logger.notify('No zipped packages.')
if unzipped:
if options.sort_files:
unzipped.sort(key=lambda x: -x[1])
logger.notify('Unzipped packages:')
logger.indent += 2
try:
for filename, count in unzipped:
logger.notify('%s (%i files)' % (filename, count))
finally:
logger.indent -= 2
else:
logger.notify('No unzipped packages.')
finally:
logger.indent -= 2
def count_package(self, path):
total = 0
for dirpath, dirnames, filenames in os.walk(path):
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
total += len(filenames)
return total
|
hyperized/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_router.py | 52 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
type: bool
default: 'yes'
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
type: bool
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
project:
description:
- Unique name or ID of the project.
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
interfaces:
description:
- List of subnets to attach to the router internal interface. Default
gateway associated with the subnet will be automatically attached
with the router's internal interface.
In order to provide an ip address different from the default
gateway,parameters are passed as dictionary with keys as network
name or ID(net), subnet name or ID (subnet) and the IP of
port (portip) from the network.
User defined portip is often required when a multiple router need
to be connected to a single subnet for which the default gateway has
been already used.
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Create another router with two internal subnet interfaces.One with user defined port
# ip and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Create another router with two internal subnet interface.One with user defined port
# ip and and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Create another router with two internal subnet interface. one with user defined port
# ip and and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Router ID.
type: str
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: str
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: bool
sample: true
status:
description: The router status.
type: str
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: str
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dict
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
ROUTER_INTERFACE_OWNERS = set([
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
])
def _router_internal_interfaces(cloud, router):
for port in cloud.list_router_interfaces(router, 'internal'):
if port['device_owner'] in ROUTER_INTERFACE_OWNERS:
yield port
def _needs_update(cloud, module, router, network, internal_subnet_ids, internal_port_ids, filters=None):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
# check if enable_snat is set in module params
if module.params['enable_snat'] is not None:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'], filters)
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in _router_internal_interfaces(cloud, router):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
for iface in module.params['interfaces']:
if isinstance(iface, dict):
for p_id in internal_port_ids:
p = cloud.get_port(name_or_id=p_id)
if 'fixed_ips' in p:
for fip in p['fixed_ips']:
internal_subnet_ids.append(fip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
internal_subnet_ids = []
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids, internal_portids, filters=None):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids, internal_portids, filters)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
if module.params.get('enable_snat') is not None:
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud, filters=None):
external_subnet_ids = []
internal_subnet_ids = []
internal_port_ids = []
existing_port_ips = []
existing_port_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
if isinstance(iface, str):
subnet = cloud.get_subnet(iface, filters)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
elif isinstance(iface, dict):
subnet = cloud.get_subnet(iface['subnet'], filters)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
net = cloud.get_network(iface['net'])
if not net:
module.fail_json(msg='net %s not found' % iface['net'])
if "portip" not in iface:
internal_subnet_ids.append(subnet['id'])
elif not iface['portip']:
module.fail_json(msg='put an ip in portip or remove it from list to assign default port to router')
else:
for existing_port in cloud.list_ports(filters={'network_id': net.id}):
for fixed_ip in existing_port['fixed_ips']:
if iface['portip'] == fixed_ip['ip_address']:
internal_port_ids.append(existing_port.id)
existing_port_ips.append(fixed_ip['ip_address'])
if iface['portip'] not in existing_port_ips:
p = cloud.create_port(network_id=net.id, fixed_ips=[{'ip_address': iface['portip'], 'subnet_id': subnet.id}])
if p:
internal_port_ids.append(p.id)
return external_subnet_ids, internal_subnet_ids, internal_port_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool'),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
sdk, cloud = openstack_cloud_from_module(module)
try:
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud, filters)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, subnet_internal_ids, internal_portids, filters)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for int_s_id in subnet_internal_ids:
cloud.add_router_interface(router, subnet_id=int_s_id)
changed = True
# add interface by port id as well
for int_p_id in internal_portids:
cloud.add_router_interface(router, port_id=int_p_id)
changed = True
else:
if _needs_update(cloud, module, router, net, subnet_internal_ids, internal_portids, filters):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
if internal_portids or subnet_internal_ids:
router = updated_router
ports = _router_internal_interfaces(cloud, router)
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
if internal_portids:
external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud, filters)
for int_p_id in internal_portids:
cloud.add_router_interface(router, port_id=int_p_id)
changed = True
if subnet_internal_ids:
for s_id in subnet_internal_ids:
cloud.add_router_interface(router, subnet_id=s_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = _router_internal_interfaces(cloud, router)
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
Unknowncmbk/HiddenMarkovModel | refs/heads/master | test.py | 1 | #
# Provides test cases for various test cases.
#
# Compiled against Python 2.7
# Author: Stephen Bahr (sbahr@bu.edu)
import copy
import math
import unittest
import markov
class Test(unittest.TestCase):
'''Unit tests for assignment 5'''
_TRAINING_DATA = (
('<s>', '<s>'),
('N', 'spot'),
('V', 'runs'),
('<s>', '<s>'),
('N', 'spot'),
('V', 'runs'),
('<s>', '<s>'),
('N', 'spot'),
('N', 'runs'),
('<s>', '<s>'),
('V', 'runs'),
('V', 'spot'),
('<s>', '<s>'),
('V', 'runs'),
('N', 'spot'),
('<s>', '<s>'))
_TRANSITION_PROBABILITIES = {
# Transitions from <s>
('<s>', 'N'): math.log(0.6),
('<s>', 'V'): math.log(0.4),
# Transitions from N
('N', '<s>'): math.log(0.4),
('N', 'N'): math.log(0.2),
('N', 'V'): math.log(0.4),
# Transitions from V
('V', '<s>'): math.log(0.6),
('V', 'N'): math.log(0.2),
('V', 'V'): math.log(0.2),
}
_EMISSION_PROBABILITIES = {
# Emission probabilities from <s>
('<s>', '<s>'): math.log(1),
# Emission probabilities from N
('N', 'runs'): math.log(0.2),
('N', 'spot'): math.log(0.8),
# Emission probabilities from V
('V', 'spot'): math.log(0.2),
('V', 'runs'): math.log(0.8),
}
_MODEL = hw5.HiddenMarkovModel(1, copy.deepcopy(_EMISSION_PROBABILITIES),
copy.deepcopy(_TRANSITION_PROBABILITIES), ['N', 'V', '<s>'],
set(['runs', 'spot']))
_LATTICE = [
{'<s>': (math.log(1), None)},
{'N': (math.log(0.48), '<s>'), 'V': (math.log(0.08), '<s>')},
{'N': (math.log(0.0192), 'N'), 'V': (math.log(0.1536), 'N')},
{'<s>': (math.log(0.09216), 'V')}]
def test_train(self):
model = hw5.HiddenMarkovModel.train(self._TRAINING_DATA,
hw5.NO_SMOOTHING,
hw5.PREDICT_MOST_COMMON_PART_OF_SPEECH,
order=1)
self.assertEqual(1, model.order)
for (p0, p1), log_expected in self._TRANSITION_PROBABILITIES.iteritems():
found = math.exp(model.transition.get((p0, p1), -50))
expected = math.exp(log_expected)
self.assertAlmostEqual(expected, found, places=2, msg=(
'Pr(%s=>%s): should be %s, is %s' % (p0, p1, expected, found)))
for (p, w), log_expected in self._EMISSION_PROBABILITIES.iteritems():
found = math.exp(model.emission.get((p, w), -50))
expected = math.exp(log_expected)
self.assertAlmostEqual(expected, found, places=2, msg=(
'Pr(%s|%s): should be %s, is %s' % (w, p, expected, found)))
def test_compute_lattice(self):
lattice = self._MODEL.compute_lattice(['spot', 'runs'])
self.assertEqual(len(lattice), len(self._LATTICE))
for expected, found in zip(self._LATTICE, lattice):
for pos in expected:
log_expected_score, expected_previous = expected[pos]
found_score, found_previous = found.get(pos, (0, None))
expected_score = math.exp(log_expected_score)
self.assertEqual(expected_previous, found_previous)
self.assertAlmostEqual(expected_score, math.exp(found_score), places=4)
def test_find_best_path(self):
self.assertEqual(['N', 'V'],
hw5.HiddenMarkovModel.find_best_path(self._LATTICE))
def test_baseline(self):
model= hw5.BaselineModel(
self._TRAINING_DATA + (('N', 'spot'), ('N', 'spot')))
self.assertEqual('N', model.dictionary['spot'])
self.assertEqual('V', model.dictionary['runs'])
self.assertEqual('N', model.default)
def test_add_one_smoothing(self):
model = hw5.HiddenMarkovModel.train(
(('N', 'spot'),
('V', 'runs'),
('N', 'spot'),
('N', 'spot')),
hw5.ADD_ONE_SMOOTHING,
hw5.PREDICT_MOST_COMMON_PART_OF_SPEECH,
order=1)
self.assertAlmostEqual(0.8, math.exp(model.emission['N', 'spot']), places=2)
self.assertAlmostEqual(0.67, math.exp(model.emission['V', 'runs']),
places=2)
self.assertAlmostEqual(0.2, math.exp(model.emission['N', 'runs']), places=2)
self.assertAlmostEqual(0.33, math.exp(model.emission['V', 'spot']),
places=2)
self.assertAlmostEqual(0.5, math.exp(model.transition['N', 'N']), places=2)
self.assertAlmostEqual(0.5, math.exp(model.transition['N', 'V']), places=2)
self.assertAlmostEqual(0.33, math.exp(model.transition['V', 'V']), places=2)
self.assertAlmostEqual(0.67, math.exp(model.transition['V', 'N']), places=2)
def test_hmm_train_has_default_parameters(self):
# Make sure that later parameters to HMM.train are optional
model = hw5.HiddenMarkovModel.train(self._TRAINING_DATA)
if __name__ == '__main__':
unittest.main()
|
CospanDesign/nysa-tx1-pcie-platform | refs/heads/master | tx1_pcie/slave/wb_tx1_pcie/cocotb/dut_driver.py | 1 | #PUT LICENCE HERE!
"""
wb_tx1_pcie Driver
"""
import sys
import os
import time
from array import array as Array
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir))
from nysa.host.driver import driver
#Sub Module ID
#Use 'nysa devices' to get a list of different available devices
DEVICE_TYPE = "Experiment"
SDB_ABI_VERSION_MINOR = 0
SDB_VENDOR_ID = 0
try:
SDB_ABI_VERSION_MINOR = 0
SDB_VENDOR_ID = 0x800000000000C594
except SyntaxError:
pass
#Register Constants
CONTROL_ADDR = 0x00000000
STATUS_ADDR = 0x00000001
CONFIG_COMMAND = 0x00000002
CONFIG_STATUS = 0x00000003
CONFIG_DCOMMAND = 0x00000004
CONFIG_DCOMMAND2 = 0x00000005
CONFIG_DSTATUS = 0x00000006
CONFIG_LCOMMAND = 0x00000007
CONFIG_LSTATUS = 0x00000008
CONFIG_LINK_STATE = 0x00000009
RX_ELEC_IDLE = 0x0000000A
LTSSM_STATE = 0x0000000B
GTX_PLL_LOCK = 0x0000000C
TX_DIFF_CTR = 0x0000000D
STS_BIT_LINKUP = 0
STS_BIT_USR_RST = 1
STS_BIT_PCIE_RST_N = 2
STS_BIT_PHY_RDY_N = 3
STS_PLL_LOCKED = 4
STS_CLK_IN_STOPPED = 5
class wb_tx1_pcieDriver(driver.Driver):
""" wb_tx1_pcie
Communication with a DutDriver wb_tx1_pcie Core
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name(DEVICE_TYPE)
@staticmethod
def get_abi_minor():
return SDB_ABI_VERSION_MINOR
@staticmethod
def get_vendor_id():
return SDB_VENDOR_ID
def __init__(self, nysa, urn, debug = False):
super(wb_tx1_pcieDriver, self).__init__(nysa, urn, debug)
def set_control(self, control):
self.write_register(CONTROL_ADDR, control)
def get_control(self):
return self.read_register(CONTROL_ADDR)
def set_tx_diff(self, value):
self.write_register(TX_DIFF_CTR, value)
def get_tx_diff(self):
return self.read_register(TX_DIFF_CTR)
def is_linkup(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_LINKUP)
def is_pcie_usr_rst(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_USR_RST)
def is_pcie_phy_rst(self):
return self.is_register_bit_set(STATUS_ADDR, STS_BIT_PCIE_RST_N)
def is_pll_locked(self):
return self.is_register_bit_set(STATUS_ADDR, STS_PLL_LOCKED)
def is_clk_in_stopped(self):
return self.is_register_bit_set(STATUS_ADDR, STS_CLK_IN_STOPPED)
def is_pcie_phy_ready(self):
return not self.is_register_bit_set(STATUS_ADDR, STS_BIT_PHY_RDY_N)
def get_ltssm_state(self):
state = self.read_register(LTSSM_STATE)
if state == 0x000 : return "Detect.Quiet"
elif state == 0x001 : return "Detect.Quiet.Gen2"
elif state == 0x002 : return "Detect.Active"
elif state == 0x003 : return "Detect.ActiveSecond"
elif state == 0x004 : return "Polling.Active"
elif state == 0x005 : return "Polling.Config"
elif state == 0x006 : return "Polling.Comp.Pre.Send.Eios"
elif state == 0x007 : return "Polling.Comp.Pre.Timeout"
elif state == 0x008 : return "Polling.Comp.Send.Pattern"
elif state == 0x009 : return "Polling.Comp.Post.Send.Eior"
elif state == 0x00A : return "Polling.Comp.Post.Timeout"
elif state == 0x00B : return "Cfg.Lwidth.St0"
elif state == 0x00C : return "Cfg.Lwidth.St1"
elif state == 0x00D : return "Cfg.LWidth.Ac0"
elif state == 0x00E : return "Cfg.Lwidth.Ac1"
elif state == 0x00F : return "Cfg.Lnum.Wait"
elif state == 0x0010 : return "Cfg.Lnum.Acpt"
elif state == 0x0011 : return "Cfg.Complete.1"
elif state == 0x0012 : return "Cfg.Complete.2"
elif state == 0x0013 : return "Cfg.Complete.4"
elif state == 0x0014 : return "Cfg.Complete.8"
elif state == 0x0015 : return "Cfg.Idle"
elif state == 0x0016 : return "L0"
elif state == 0x0017 : return "L1.Entry.0"
elif state == 0x0018 : return "L1.Entry.1"
elif state == 0x0019 : return "L1.Entry.2"
elif state == 0x001A : return "L1.Idle"
elif state == 0x001B : return "L1.Exit"
elif state == 0x001C : return "Rec.RcvLock"
elif state == 0x001D : return "Rec.RcvCfg"
elif state == 0x001E : return "Rec.Speed.0"
elif state == 0x001F : return "Rec.Speed.1"
elif state == 0x0020 : return "Rec.Idle"
elif state == 0x0021 : return "Hot.Rst"
elif state == 0x0022 : return "Disabled.Entry.0"
elif state == 0x0023 : return "Disabled.Entry.1"
elif state == 0x0024 : return "Disabled.Entry.2"
elif state == 0x0025 : return "Disabled.Idle"
elif state == 0x0026 : return "Dp.Cfg.Lwidth.St0"
elif state == 0x0027 : return "Dp.Cfg.Lwidth.St1"
elif state == 0x0028 : return "Dp.Cfg.Lwidth.St2"
elif state == 0x0029 : return "Dp.Cfg.Lwidth.Ac0"
elif state == 0x002A : return "Dp.Cfg.Lwidth.Ac1"
elif state == 0x002B : return "Dp.Cfg.Lwidth.Wait"
elif state == 0x002C : return "Dp.Cfg.Lwidth.Acpt"
elif state == 0x002D : return "To.2.Detect"
elif state == 0x002E : return "Lpbk.Entry.0"
elif state == 0x002F : return "Lpbk.Entry.1"
elif state == 0x0030 : return "Lpbk.Active.0"
elif state == 0x0031 : return "Lpbk.Exit0"
elif state == 0x0032 : return "Lpbk.Exit1"
elif state == 0x0033 : return "Lpbkm.Entry0"
else:
return "Unknown State: 0x%02X" % state
def get_gtx_pll_lock_reg(self):
return self.read_register(GTX_PLL_LOCK)
def enable_control_0_bit(self, enable):
self.enable_register_bit(CONTROL_ADDR, ZERO_BIT, enable)
def is_control_0_bit_set(self):
return self.is_register_bit_set(CONTROL_ADDR, ZERO_BIT)
def get_cfg_command(self):
return self.read_register(CONFIG_COMMAND)
def get_cfg_status(self):
return self.read_register(CONFIG_STATUS)
def get_cfg_dcommand(self):
return self.read_register(CONFIG_DCOMMAND)
def get_cfg_dcommand2(self):
return self.read_register(CONFIG_DCOMMAND2)
def get_cfg_dstatus(self):
return self.read_register(CONFIG_DSTATUS)
def get_cfg_lcommand(self):
return self.read_register(CONFIG_LCOMMAND)
def get_cfg_lstatus(self):
return self.read_register(CONFIG_LSTATUS)
def get_link_state(self):
return self.read_register(CONFIG_LINK_STATE)
def get_elec_idle(self):
return self.read_register(RX_ELEC_IDLE)
|
danlrobertson/servo | refs/heads/master | tests/wpt/web-platform-tests/service-workers/service-worker/resources/update-max-aged-worker-imported-script.py | 46 | import time
def main(request, response):
headers = [('Content-Type', 'application/javascript'),
('Cache-Control', 'max-age=86400'),
('Last-Modified', time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()))]
body = '''
const importTime = {time:8f};
'''.format(time=time.time())
return headers, body
|
TangXT/edx-platform | refs/heads/master | common/djangoapps/user_api/middleware.py | 9 | """
Middleware for user api.
Adds user's tags to tracking event context.
"""
from track.contexts import COURSE_REGEX
from eventtracking import tracker
from user_api.models import UserCourseTag
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class UserTagsEventContextMiddleware(object):
"""Middleware that adds a user's tags to tracking event context."""
CONTEXT_NAME = 'user_tags_context'
def process_request(self, request):
"""
Add a user's tags to the tracking event context.
"""
match = COURSE_REGEX.match(request.build_absolute_uri())
course_id = None
if match:
course_id = match.group('course_id')
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
context = {}
if course_id:
context['course_id'] = course_id
if request.user.is_authenticated():
context['course_user_tags'] = dict(
UserCourseTag.objects.filter(
user=request.user.pk,
course_id=course_key,
).values_list('key', 'value')
)
else:
context['course_user_tags'] = {}
tracker.get_tracker().enter_context(
self.CONTEXT_NAME,
context
)
def process_response(self, request, response): # pylint: disable=unused-argument
"""Exit the context if it exists."""
try:
tracker.get_tracker().exit_context(self.CONTEXT_NAME)
except: # pylint: disable=bare-except
pass
return response
|
atsolakid/edx-platform | refs/heads/master | common/djangoapps/student/migrations/0048_add_profile_image_version.py | 84 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.profile_image_uploaded_at'
db.add_column('auth_userprofile', 'profile_image_uploaded_at',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.profile_image_uploaded_at'
db.delete_column('auth_userprofile', 'profile_image_uploaded_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.dashboardconfiguration': {
'Meta': {'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.entranceexamconfiguration': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'EntranceExamConfiguration'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'skip_entrance_exam': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.linkedinaddtoprofileconfiguration': {
'Meta': {'object_name': 'LinkedInAddToProfileConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'company_identifier': ('django.db.models.fields.TextField', [], {}),
'dashboard_tracking_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trk_partner_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bio': ('django.db.models.fields.CharField', [], {'max_length': '3000', 'db_index': 'False', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'has_profile_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
gunthercox/ChatterBot | refs/heads/master | chatterbot/storage/__init__.py | 2 | from chatterbot.storage.storage_adapter import StorageAdapter
from chatterbot.storage.django_storage import DjangoStorageAdapter
from chatterbot.storage.mongodb import MongoDatabaseAdapter
from chatterbot.storage.sql_storage import SQLStorageAdapter
__all__ = (
'StorageAdapter',
'DjangoStorageAdapter',
'MongoDatabaseAdapter',
'SQLStorageAdapter',
)
|
denisff/python-for-android | refs/heads/master | python-modules/twisted/twisted/trial/test/test_output.py | 61 | # Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the output generated by trial.
"""
import os, StringIO
from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = StringIO.StringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config['temp-directory'])
suite = trial._getSuite(config)
result = myRunner.run(suite)
return output.getvalue()
class TestImportErrors(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial('--temp-directory', self.mktemp(), *args)
def _print(self, stuff):
print stuff
return stuff
def failUnlessIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failUnlessIn(
containee, container, *args, **kwargs)
return container
def failIfIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failIfIn(
containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial('twisted.doesntexist')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'twisted.doesntexist')
return d
def test_nonexistentPackage(self):
d = self.runTrial('doesntexist')
self.failUnlessIn(d, 'doesntexist')
self.failUnlessIn(d, 'ModuleNotFound')
self.failUnlessIn(d, '[ERROR]')
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial('doesntexist.barney')
self.failUnlessIn(d, 'doesntexist.barney')
self.failUnlessIn(d, 'ObjectNotFound')
self.failUnlessIn(d, '[ERROR]')
return d
def test_badpackage(self):
d = self.runTrial('badpackage')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'badpackage')
self.failIfIn(d, 'IOError')
return d
def test_moduleInBadpackage(self):
d = self.runTrial('badpackage.test_module')
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage.test_module")
self.failIfIn(d, 'IOError')
return d
def test_badmodule(self):
d = self.runTrial('package.test_bad_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_bad_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_badimport(self):
d = self.runTrial('package.test_import_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_import_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_recurseImport(self):
d = self.runTrial('package')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'test_bad_module')
self.failUnlessIn(d, 'test_import_module')
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_recurseImportErrors(self):
d = self.runTrial('package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package2')
self.failUnlessIn(d, 'test_module')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial('-N', 'package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
return d
def test_regularRun(self):
d = self.runTrial('package.test_module')
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent, 'package', 'test_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent,
'package', 'test_dos_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
|
msebire/intellij-community | refs/heads/master | python/testData/refactoring/rename/googleDocStringAttribute.py | 53 | class C:
"""
Attributes:
foo : ignored
"""
def __init__(self):
self.fo<caret>o = 42
|
MotorolaMobilityLLC/external-chromium_org | refs/heads/kitkat-mr1-release-falcon-gpe | chrome/browser/safe_browsing/safe_browsing_testserver.py | 74 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wraps the upstream safebrowsing_test_server.py to run in Chrome tests."""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'net',
'tools', 'testserver'))
import testserver_base
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for safebrowsing_test_server.py."""
def create_server(self, server_data):
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'third_party',
'safe_browsing', 'testing'))
import safebrowsing_test_server
server = safebrowsing_test_server.SetupServer(
self.options.data_file, self.options.host, self.options.port,
opt_enforce_caching=False, opt_validate_database=True)
print 'Safebrowsing HTTP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
return server
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--data-file', dest='data_file',
help='File containing safebrowsing test '
'data and expectations')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
|
tjsavage/rototutor_djangononrel | refs/heads/master | dbindexes.py | 228 | from dbindexer import autodiscover
autodiscover()
|
EduPepperPD/pepper2013 | refs/heads/master | lms/djangoapps/bulk_email/models.py | 8 | """
Models for bulk email
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration bulk_email --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/bulk_email/migrations/
"""
import logging
from django.db import models
from django.contrib.auth.models import User
log = logging.getLogger(__name__)
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta: # pylint: disable=C0111
abstract = True
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_ALL = 'all'
class CourseEmail(Email, models.Model):
"""
Stores information for an email to a course.
"""
# Three options for sending that we provide from the instructor dashboard:
# * Myself: This sends an email to the staff member that is composing the email.
#
# * Staff and instructors: This sends an email to anyone in the staff group and
# anyone in the instructor group
#
# * All: This sends an email to anyone enrolled in the course, with any role
# (student, staff, or instructor)
#
TO_OPTIONS = (
(SEND_TO_MYSELF, 'Myself'),
(SEND_TO_STAFF, 'Staff and instructors'),
(SEND_TO_ALL, 'All')
)
course_id = models.CharField(max_length=255, db_index=True)
to_option = models.CharField(max_length=64, choices=TO_OPTIONS, default=SEND_TO_MYSELF)
def __unicode__(self):
return self.subject
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = models.CharField(max_length=255, db_index=True)
class Meta: # pylint: disable=C0111
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
@staticmethod
def get_template():
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
return CourseEmailTemplate.objects.get()
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
This doesn't insert user's text into template, until such time we can
support proper error handling due to errors in the message body
(e.g. due to the use of curly braces).
Instead, for now, we insert the message body *after* the substitutions
have been performed, so that anything in the message body that might
interfere will be innocently returned as-is.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# If we wanted to support substitution, we'd call:
# format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, without converting to an encoded byte array.
return result
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.html_template, htmltext, context)
|
zBMNForks/graphite-web | refs/heads/master | webapp/tests/test_browser.py | 35 | # -*- coding: utf-8 -*-
import json
import os
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from . import DATA_DIR
class BrowserTest(TestCase):
def test_browser(self):
url = reverse('browser')
response = self.client.get(url)
self.assertContains(response, 'Graphite Browser')
def test_header(self):
self.assertEqual(User.objects.count(), 0)
url = reverse('browser_header')
response = self.client.get(url)
self.assertContains(response, 'Graphite Browser Header')
# Graphite has created a default user
self.assertEqual(User.objects.get().username, 'default')
def test_url_prefix(self):
self.assertEqual(reverse('browser'), '/graphite/')
@override_settings(INDEX_FILE=os.path.join(DATA_DIR, 'index'))
def test_search(self):
url = reverse('browser_search')
response = self.client.post(url)
self.assertEqual(response.content, '')
# simple query
response = self.client.post(url, {'query': 'collectd'})
self.assertEqual(response.content.split(',')[0],
'collectd.test.df-root.df_complex-free')
# No match
response = self.client.post(url, {'query': 'other'})
self.assertEqual(response.content, '')
# Multiple terms (OR)
response = self.client.post(url, {'query': 'midterm shortterm'})
self.assertEqual(response.content.split(','),
['collectd.test.load.load.midterm',
'collectd.test.load.load.shortterm'])
def test_unicode_graph_name(self):
url = reverse('browser_my_graph')
user = User.objects.create_user('test', 'test@example.com', 'pass')
self.client.login(username='test', password='pass')
response = self.client.get(url, {'path': ''})
self.assertEqual(response.status_code, 200)
user.profile.mygraph_set.create(name=u'fòo', url='bar')
response = self.client.get(url, {'path': ''})
self.assertEqual(response.status_code, 200)
[leaf] = json.loads(response.content)
self.assertEqual(leaf['text'], u'fòo')
|
vikingMei/mxnet | refs/heads/master | python/mxnet/contrib/text/embedding.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=consider-iterating-dictionary
"""Text token embeddings."""
from __future__ import absolute_import
from __future__ import print_function
import io
import logging
import os
import tarfile
import warnings
import zipfile
from . import _constants as C
from . import indexer
from ... import ndarray as nd
from ... import registry
class TokenEmbedding(indexer.TokenIndexer):
"""Token embedding base class.
To load token embeddings from an externally hosted pre-trained token embedding file, such as
those of GloVe and FastText, use `TokenEmbedding.create(embedding_name, pretrained_file_name)`.
To get all the available `embedding_name` and `pretrained_file_name`, use
`TokenEmbedding.get_embedding_and_pretrained_file_names()`.
Alternatively, to load embedding vectors from a custom pre-trained token embedding file, use
:class:`~mxnet.contrib.text.embedding.CustomEmbedding`.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
token embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained token embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
For the same token, its index and embedding vector may vary across different instances of
:class:`~mxnet.contrib.text.embedding.TokenEmbedding`.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, **kwargs):
super(TokenEmbedding, self).__init__(**kwargs)
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
return pretrained_file_name
@classmethod
def _get_pretrained_file_url(cls, pretrained_file_name):
repo_url = os.environ.get('MXNET_GLUON_REPO', C.APACHE_REPO_URL)
embedding_cls = cls.__name__.lower()
url_format = '{repo_url}gluon/embeddings/{cls}/{file_name}'
return url_format.format(repo_url=repo_url, cls=embedding_cls,
file_name=cls._get_download_file_name(pretrained_file_name))
@classmethod
def _get_pretrained_file(cls, embedding_root, pretrained_file_name):
from ...gluon.utils import check_sha1, download
embedding_cls = cls.__name__.lower()
embedding_root = os.path.expanduser(embedding_root)
url = cls._get_pretrained_file_url(pretrained_file_name)
embedding_dir = os.path.join(embedding_root, embedding_cls)
pretrained_file_path = os.path.join(embedding_dir, pretrained_file_name)
downloaded_file = os.path.basename(url)
downloaded_file_path = os.path.join(embedding_dir, downloaded_file)
expected_file_hash = cls.pretrained_file_name_sha1[pretrained_file_name]
if hasattr(cls, 'pretrained_archive_name_sha1'):
expected_downloaded_hash = \
cls.pretrained_archive_name_sha1[downloaded_file]
else:
expected_downloaded_hash = expected_file_hash
if not os.path.exists(pretrained_file_path) \
or not check_sha1(pretrained_file_path, expected_file_hash):
download(url, downloaded_file_path, sha1_hash=expected_downloaded_hash)
ext = os.path.splitext(downloaded_file)[1]
if ext == '.zip':
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(embedding_dir)
elif ext == '.gz':
with tarfile.open(downloaded_file_path, 'r:gz') as tar:
tar.extractall(path=embedding_dir)
return pretrained_file_path
def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'):
"""Load embedding vectors from the pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
pretrained_file_path = os.path.expanduser(pretrained_file_path)
if not os.path.isfile(pretrained_file_path):
raise ValueError('`pretrained_file_path` must be a valid path to '
'the pre-trained token embedding file.')
logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path)
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
line_num = 0
with io.open(pretrained_file_path, 'r', encoding=encoding) as f:
for line in f:
line_num += 1
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \
'data format of the pre-trained token embedding file %s ' \
'is unexpected.' % (line_num, pretrained_file_path)
token, elems = elems[0], [float(i) for i in elems[1:]]
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token)
elif token in tokens:
warnings.warn('At line %d of the pre-trained token embedding file: the '
'embedding vector for token %s has been loaded and a duplicate '
'embedding for the same token is seen and skipped.' %
(line_num, token))
elif len(elems) == 1:
warnings.warn('At line %d of the pre-trained text embedding file: token %s '
'with 1-dimensional vector %s is likely a header and is '
'skipped.' % (line_num, token, elems))
else:
if vec_len is None:
vec_len = len(elems)
# Reserve a vector slot for the unknown token at the
# very beggining because the unknown index is 0.
all_elems.extend([0] * vec_len)
else:
assert len(elems) == vec_len, \
'At line %d of the pre-trained token embedding file: the dimension ' \
'of token %s is %d but the dimension of previous tokens is %d. ' \
'Dimensions of all the tokens must be the same.' \
% (line_num, token, len(elems), vec_len)
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token)
self._vec_len = vec_len
self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len))
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len)
else:
self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec)
@property
def vec_len(self):
return self._vec_len
@property
def idx_to_vec(self):
return self._idx_to_vec
def get_vecs_by_tokens(self, tokens, lower_case_backup=False):
"""Look up embedding vectors of tokens.
Parameters
----------
tokens : str or list of strs
A token or a list of tokens.
lower_case_backup : bool, default False
If False, each token in the original case will be looked up; if True, each token in the
original case will be looked up first, if not found in the keys of the property
`token_to_idx`, the token in the lower case will be looked up.
Returns
-------
mxnet.ndarray.NDArray:
The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is
a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of
strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).
"""
to_reduce = False
if not isinstance(tokens, list):
tokens = [tokens]
to_reduce = True
if not lower_case_backup:
indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens]
else:
indices = [self.token_to_idx[token] if token in self.token_to_idx
else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX)
for token in tokens]
vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0],
self.idx_to_vec.shape[1])
return vecs[0] if to_reduce else vecs
def update_token_vectors(self, tokens, new_vectors):
"""Updates embedding vectors for tokens.
Parameters
----------
tokens : str or a list of strs
A token or a list of tokens whose embedding vector are to be updated.
new_vectors : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embeddings of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
"""
assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.'
if not isinstance(tokens, list) or len(tokens) == 1:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \
'`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.'
if not isinstance(tokens, list):
tokens = [tokens]
if len(new_vectors.shape) == 1:
new_vectors = new_vectors.expand_dims(0)
else:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \
'`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.'
assert new_vectors.shape == (len(tokens), self.vec_len), \
'The length of new_vectors must be equal to the number of tokens ' \
'and the width of new_vectors must be equal to the dimension of ' \
'embeddings of the glossary.'
indices = []
for token in tokens:
if token in self.token_to_idx:
indices.append(self.token_to_idx[token])
else:
raise ValueError('Token %s is unknown. To update the embedding vector for an '
'unknown token, please specify it explicitly as the '
'`unknown_token` %s in `tokens`. This is to avoid unintended '
'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX]))
self._idx_to_vec[nd.array(indices)] = new_vectors
@staticmethod
def register(embedding_cls):
"""Registers a new token embedding.
Once an embedding is registered, we can create an instance of this embedding with
:func:`~mxnet.contrib.text.embedding.TokenEmbedding.create`.
Examples
--------
>>> @mxnet.contrib.text.embedding.TokenEmbedding.register
... class MyTextEmbed(mxnet.contrib.text.embedding.TokenEmbedding):
... def __init__(self, pretrained_file_name='my_pretrain_file'):
... pass
>>> embed = mxnet.contrib.text.embedding.TokenEmbedding.create('MyTokenEmbed')
>>> print(type(embed))
<class '__main__.MyTokenEmbed'>
"""
register_text_embedding = registry.get_register_func(TokenEmbedding, 'token embedding')
return register_text_embedding(embedding_cls)
@staticmethod
def create(embedding_name, **kwargs):
"""Creates an instance of :class:`~mxnet.contrib.text.embedding.TokenEmbedding`.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `pretrained_file_name`, use
`mxnet.contrib.text.embedding.TokenEmbedding.get_embedding_and_pretrained_file_names()`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
Returns
-------
:class:`~mxnet.contrib.text.glossary.TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs)
@classmethod
def _check_pretrained_file_names(cls, pretrained_file_name):
"""Checks if a pre-trained token embedding file name is valid.
Parameters
----------
pretrained_file_name : str
The pre-trained token embedding file.
"""
embedding_name = cls.__name__.lower()
if pretrained_file_name not in cls.pretrained_file_name_sha1:
raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid '
'pretrained files for embedding %s: %s' %
(pretrained_file_name, embedding_name, embedding_name,
', '.join(cls.pretrained_file_name_sha1.keys())))
@staticmethod
def get_embedding_and_pretrained_file_names(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`mxnet.contrib.text.embedding.TokenEmbedding.create(embedding_name, pretrained_file_name)`.
This method returns all the valid names of `pretrained_file_name` for the specified
`embedding_name`. If `embedding_name` is set to None, this method returns all the valid
names of `embedding_name` with associated `pretrained_file_name`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`pretrained_file_name`)
for the specified token embedding name (`embedding_name`). If the text embeding name is
set to None, returns a dict mapping each valid token embedding name to a list of valid
pre-trained files (`pretrained_file_name`). They can be plugged into
`mxnet.contrib.text.embedding.TokenEmbedding.create(embedding_name,
pretrained_file_name)`.
"""
text_embedding_reg = registry.get_registry(TokenEmbedding)
if embedding_name is not None:
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` %s. Use '
'`get_embedding_and_pretrained_file_names('
'embedding_name=None).keys()` to get all the valid embedding '
'names.' % embedding_name)
return list(text_embedding_reg[
embedding_name].pretrained_file_name_sha1.keys())
else:
return {embedding_name: list(
embedding_cls.pretrained_file_name_sha1.keys())
for embedding_name, embedding_cls in
registry.get_registry(TokenEmbedding).items()}
@TokenEmbedding.register
class GloVe(TokenEmbedding):
"""The GloVe word embedding.
GloVe is an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and
the resulting representations showcase interesting linear substructures of the word vector
space. (Source from https://nlp.stanford.edu/projects/glove/)
Reference:
GloVe: Global Vectors for Word Representation.
Jeffrey Pennington, Richard Socher, and Christopher D. Manning.
https://nlp.stanford.edu/pubs/glove.pdf
Website:
https://nlp.stanford.edu/projects/glove/
To get the updated URLs to the externally hosted pre-trained token embedding
files, visit https://nlp.stanford.edu/projects/glove/
License for pre-trained embeddings:
https://opendatacommons.org/licenses/pddl/
Parameters
----------
pretrain_file : str, default 'glove.840B.300d.txt'
The name of the pre-trained token embedding file.
embed_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding archive file and its SHA-1 hash.
pretrained_archive_name_sha1 = C.GLOVE_PRETRAINED_FILE_SHA1
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.GLOVE_PRETRAINED_ARCHIVE_SHA1
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
# Map a pretrained embedding file to its archive to download.
src_archive = {archive.split('.')[1]: archive for archive in
GloVe.pretrained_archive_name_sha1.keys()}
archive = src_archive[pretrained_file_name.split('.')[1]]
return archive
def __init__(self, pretrained_file_name='glove.840B.300d.txt',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, **kwargs):
GloVe._check_pretrained_file_names(pretrained_file_name)
super(GloVe, self).__init__(**kwargs)
pretrained_file_path = GloVe._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
@TokenEmbedding.register
class FastText(TokenEmbedding):
"""The fastText word embedding.
FastText is an open-source, free, lightweight library that allows users to learn text
representations and text classifiers. It works on standard, generic hardware. Models can later
be reduced in size to even fit on mobile devices. (Source from https://fasttext.cc/)
References:
Enriching Word Vectors with Subword Information.
Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov.
https://arxiv.org/abs/1607.04606
Bag of Tricks for Efficient Text Classification.
Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov.
https://arxiv.org/abs/1607.01759
FastText.zip: Compressing text classification models.
Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, Herve Jegou,
and Tomas Mikolov.
https://arxiv.org/abs/1612.03651
Website:
https://fasttext.cc/
To get the updated URLs to the externally hosted pre-trained token embedding files, visit
https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md
License for pre-trained embeddings:
https://creativecommons.org/licenses/by-sa/3.0/
Parameters
----------
pretrain_file : str, default 'wiki.en.vec'
The name of the pre-trained token embedding file.
embed_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.FAST_TEXT_FILE_SHA1
def __init__(self, pretrained_file_name='wiki.simple.vec',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, **kwargs):
FastText._check_pretrained_file_names(pretrained_file_name)
super(FastText, self).__init__(**kwargs)
pretrained_file_path = FastText._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
class CustomEmbedding(TokenEmbedding):
"""User-defined token embedding.
This is to load embedding vectors from a user-defined pre-trained text embedding file.
Denote by '<ed>' the argument `elem_delim`. Denote by <v_ij> the j-th element of the token
embedding vector for <token_i>, the expected format of a custom pre-trained token embedding file
is:
'<token_1><ed><v_11><ed><v_12><ed>...<ed><v_1k>\\\\n<token_2><ed><v_21><ed><v_22><ed>...<ed>
<v_2k>\\\\n...'
where k is the length of the embedding vector `vec_len`.
Parameters
----------
pretrain_file_path : str
The path to the custom pre-trained token embedding file.
elem_delim : str, default ' '
The delimiter for splitting a token and every embedding vector element value on the same
line of the custom pre-trained token embedding file.
unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, pretrained_file_path, elem_delim=' ', encoding='utf8',
init_unknown_vec=nd.zeros, **kwargs):
super(CustomEmbedding, self).__init__(**kwargs)
self._load_embedding(pretrained_file_path, elem_delim, init_unknown_vec, encoding)
|
tinkhaven-organization/odoo | refs/heads/8.0 | addons/payment_paypal/tests/test_paypal.py | 378 | # -*- coding: utf-8 -*-
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.tools import mute_logger
from lxml import objectify
import urlparse
class PaypalCommon(PaymentAcquirerCommon):
def setUp(self):
super(PaypalCommon, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the paypal account
model, self.paypal_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_paypal', 'payment_acquirer_paypal')
# tde+seller@openerp.com - tde+buyer@openerp.com - tde+buyer-it@openerp.com
# some CC
self.amex = (('378282246310005', '123'), ('371449635398431', '123'))
self.amex_corporate = (('378734493671000', '123'))
self.autralian_bankcard = (('5610591081018250', '123'))
self.dinersclub = (('30569309025904', '123'), ('38520000023237', '123'))
self.discover = (('6011111111111117', '123'), ('6011000990139424', '123'))
self.jcb = (('3530111333300000', '123'), ('3566002020360505', '123'))
self.mastercard = (('5555555555554444', '123'), ('5105105105105100', '123'))
self.visa = (('4111111111111111', '123'), ('4012888888881881', '123'), ('4222222222222', '123'))
self.dankord_pbs = (('76009244561', '123'), ('5019717010103742', '123'))
self.switch_polo = (('6331101999990016', '123'))
class PaypalServer2Server(PaypalCommon):
def test_00_tx_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
res = self.payment_acquirer._paypal_s2s_get_access_token(cr, uid, [self.paypal_id], context=context)
self.assertTrue(res[self.paypal_id] is not False, 'paypal: did not generate access token')
tx_id = self.payment_transaction.s2s_create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.paypal_id,
'currency_id': self.currency_euro_id,
'reference': 'test_reference',
'partner_id': self.buyer_id,
}, {
'number': self.visa[0][0],
'cvc': self.visa[0][1],
'brand': 'visa',
'expiry_mm': 9,
'expiry_yy': 2015,
}, context=context
)
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertTrue(tx.paypal_txn_id is not False, 'paypal: txn_id should have been set after s2s request')
self.payment_transaction.write(cr, uid, tx_id, {'paypal_txn_id': False}, context=context)
class PaypalForm(PaypalCommon):
def test_10_paypal_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
self.payment_acquirer.write(cr, uid, self.paypal_id, {'fees_active': False}, context)
paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
# render the button
res = self.payment_acquirer.render(
cr, uid, self.paypal_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
form_values = {
'cmd': '_xclick',
'business': 'tde+paypal-facilitator@openerp.com',
'item_name': 'test_ref0',
'item_number': 'test_ref0',
'first_name': 'Buyer',
'last_name': 'Norbert',
'amount': '0.01',
'currency_code': 'EUR',
'address1': 'Huge Street 2/543',
'city': 'Sin City',
'zip': '1000',
'country': 'Belgium',
'email': 'norbert.buyer@example.com',
'return': '%s' % urlparse.urljoin(self.base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(self.base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(self.base_url, PaypalController._cancel_url),
}
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'paypal: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
def test_11_paypal_form_with_fees(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# update acquirer: compute fees
self.payment_acquirer.write(cr, uid, self.paypal_id, {
'fees_active': True,
'fees_dom_fixed': 1.0,
'fees_dom_var': 0.35,
'fees_int_fixed': 1.5,
'fees_int_var': 0.50,
}, context)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.paypal_id,
'test_ref0', 12.50, self.currency_euro,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
handling_found = False
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['handling']:
handling_found = True
self.assertEqual(form_input.get('value'), '1.57', 'paypal: wrong computed fees')
self.assertTrue(handling_found, 'paypal: fees_active did not add handling input in rendered form')
@mute_logger('openerp.addons.payment_paypal.models.paypal', 'ValidationError')
def test_20_paypal_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# typical data posted by paypal after client has successfully paid
paypal_post_data = {
'protection_eligibility': u'Ineligible',
'last_name': u'Poilu',
'txn_id': u'08D73520KX778924N',
'receiver_email': u'tde+paypal-facilitator@openerp.com',
'payment_status': u'Pending',
'payment_gross': u'',
'tax': u'0.00',
'residence_country': u'FR',
'address_state': u'Alsace',
'payer_status': u'verified',
'txn_type': u'web_accept',
'address_street': u'Av. de la Pelouse, 87648672 Mayet',
'handling_amount': u'0.00',
'payment_date': u'03:21:19 Nov 18, 2013 PST',
'first_name': u'Norbert',
'item_name': u'test_ref_2',
'address_country': u'France',
'charset': u'windows-1252',
'custom': u'',
'notify_version': u'3.7',
'address_name': u'Norbert Poilu',
'pending_reason': u'multi_currency',
'item_number': u'test_ref_2',
'receiver_id': u'DEG7Z7MYGT6QA',
'transaction_subject': u'',
'business': u'tde+paypal-facilitator@openerp.com',
'test_ipn': u'1',
'payer_id': u'VTDKRZQSAHYPS',
'verify_sign': u'An5ns1Kso7MWUdW4ErQKJJJ4qi4-AVoiUf-3478q3vrSmqh08IouiYpM',
'address_zip': u'75002',
'address_country_code': u'FR',
'address_city': u'Paris',
'address_status': u'unconfirmed',
'mc_currency': u'EUR',
'shipping': u'0.00',
'payer_email': u'tde+buyer@openerp.com',
'payment_type': u'instant',
'mc_gross': u'1.95',
'ipn_track_id': u'866df2ccd444b',
'quantity': u'1'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.paypal_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# check
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'pending', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.state_message, 'multi_currency', 'paypal: wrong state message after receiving a valid pending notification')
self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
self.assertFalse(tx.date_validate, 'paypal: validation date should not be updated whenr receiving pending notification')
# update tx
self.payment_transaction.write(cr, uid, [tx_id], {
'state': 'draft',
'paypal_txn_id': False,
}, context=context)
# update notification from paypal
paypal_post_data['payment_status'] = 'Completed'
# validate it
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# check
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
self.assertEqual(tx.date_validate, '2013-11-18 03:21:19', 'paypal: wrong validation date')
|
stefanbo92/maleChildren | refs/heads/master | Raspi/MotorControl.py | 1 | import time
import RPi.GPIO as GPIO
import matplotlib.pyplot as plt
# Use BCM GPIO references
# instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
class MotorControl:
def __init__(self):
#specify params
self.forwardSpeed=30
self.turnSpeed=40
self.maxSpeed=1.7*self.forwardSpeed
self.turnTime=0.25#0.32 1.71
self.wallDist=5
self.errorOld=0
self.errorIntegrated=0
'''
#working PD control!!!
self.P=0.1
self.D=3.5
'''
self.P=0.15
self.I=0.00
self.D=15
#evaluation
self.errorVec=[]
self.urVec=[]
self.totalErr=0
self.count=0
#init all GPIO pins
# vel=forward (A), dir=backward (B)
'''
self.velLeftPin=9 #pin21
self.dirLeftPin=25 #pin22
self.velRightPin=11 #pin23
self.dirRightPin=8 #pin24
'''
self.velRightPin=25 #pin22
self.dirRightPin=9 #pin21
self.velLeftPin=8 #pin24
self.dirLeftPin=11 #pin23
GPIO.setup(self.velLeftPin,GPIO.OUT)
GPIO.setup(self.dirLeftPin,GPIO.OUT)
GPIO.setup(self.velRightPin,GPIO.OUT)
GPIO.setup(self.dirRightPin,GPIO.OUT)
# init PWM pins
self.pwmLeftA=GPIO.PWM(self.velLeftPin, 500) # 500Hz PWM
self.pwmRightA=GPIO.PWM(self.velRightPin, 500)
self.pwmLeftA.start(0)
self.pwmRightA.start(0)
self.pwmLeftB=GPIO.PWM(self.dirLeftPin, 500) # 500Hz PWM
self.pwmRightB=GPIO.PWM(self.dirRightPin, 500)
self.pwmLeftB.start(0)
self.pwmRightB.start(0)
#stop all wheels
def stop(self):
self.pwmLeftA.ChangeDutyCycle(0)
self.pwmRightA.ChangeDutyCycle(0)
self.pwmLeftB.ChangeDutyCycle(0)
self.pwmRightB.ChangeDutyCycle(0)
#make a hard stop on all wheels
def stopHard(self):
self.pwmLeftA.ChangeDutyCycle(100)
self.pwmRightA.ChangeDutyCycle(100)
self.pwmLeftB.ChangeDutyCycle(100)
self.pwmRightB.ChangeDutyCycle(100)
time.sleep(0.5)
self.stop()
'''
def moveForward(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
if ul<(self.wallDist-0.5):
#turn left wheel more
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed*1.3)
print "going right!"
elif ul>(self.wallDist+0.5) and ul<50:
#turn right wheel more
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed*1.3)
print "going left!"
def moveForwardControlled(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
#control loop if distance to wall is not appropriate
if ul<20:
error=ul-self.wallDist
if error<=0:
print ("going right with "+str(1+self.P*-error))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1+self.P*-error),self.maxSpeed,100]))
else:
print ("going left with "+str(1+self.P*error))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+self.P*error),self.maxSpeed,100]))
def moveForwardControlledPID(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
#control loop if distance to wall is not appropriate
if ul<20:
error=ul-self.wallDist
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going right with "+str((1-u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.maxSpeed,100]))
else:
print ("going left with "+str((1+u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.maxSpeed,100]))
self.errorOld=error
'''
def moveForwardControlledPIDboth(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed+3)
#control loop if distance to left wall is not appropriate
if ul<20 and ul >3:
error=ul-self.wallDist
### Evaluation
self.totalErr+=abs(error)
self.errorVec.append(error)
self.urVec.append(ur)
self.count+=1
###
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going right with "+str((1-u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.maxSpeed,100]))
else:
print ("going left with "+str((1+u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.maxSpeed,100]))
self.errorOld=error
time.sleep(0.07)
elif ur<20:
error=ur-self.wallDist
### Evaluation
self.totalErr+=abs(error)
self.errorVec.append(error)
self.count+=1
###
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going left with "+str((1-u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.forwardSpeed*9.4,100]))
else:
print ("going right with "+str((1+u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.forwardSpeed*9.4,100]))
self.errorOld=error
time.sleep(0.07)
else:
time.sleep(0.07)
#time.sleep(0.08)#time.sleep(0.05)
self.stop()
time.sleep(0.02)
# move both wheels backward
def moveBack(self):
self.pwmLeftB.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightB.ChangeDutyCycle(self.forwardSpeed)
#move both wheels forward
def moveFront(self):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed+3)
#make a turn to the left
def turnLeft(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel forward
self.pwmRightA.ChangeDutyCycle(self.turnSpeed)
#turn left wheel backward
self.pwmLeftB.ChangeDutyCycle(self.turnSpeed)
#wait
time.sleep(self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
#make a turn to the right
def turnRight(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel backward
self.pwmRightB.ChangeDutyCycle(self.turnSpeed)
#turn left wheel forward
self.pwmLeftA.ChangeDutyCycle(self.turnSpeed)
#wait
time.sleep(self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
#make a 180 degree turn
def turnBack(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel backward
self.pwmRightB.ChangeDutyCycle(self.turnSpeed)
#turn left wheel forward
self.pwmLeftA.ChangeDutyCycle(self.turnSpeed)
#wait long
time.sleep(1.8*self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
# function for plotting the distance of the robot to the wall
# this can be used for evaluation and parameter tuning
def plotError(self,errorVec,urVec):
zeroVec=[]
wallVec=[]
contVec=[]
for i in range(len(errorVec)):
zeroVec.append(0)
wallVec.append(self.wallDist)
contVec.append(i)
plt.plot(contVec,errorVec,contVec,zeroVec)#,contVec,urVec,contVec,wallVec)
plt.ylabel('error')
plt.xlabel('timestep')
plt.show()
def kill(self):
# Reset GPIO settings
self.stop()
GPIO.cleanup()
print ("Total average error is: "+str(self.totalErr/self.count))
#self.plotError(self.errorVec,self.urVec)
|
HeyItsAlan/flask | refs/heads/master | tests/test_helpers.py | 142 | # -*- coding: utf-8 -*-
"""
tests.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import datetime
import flask
from logging import StreamHandler
from werkzeug.http import parse_cache_control_header, parse_options_header
from werkzeug.http import http_date
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class TestJSON(object):
def test_jsonify_date_types(self):
"""Test jsonify with datetime.date and datetime.datetime types."""
test_dates = (
datetime.datetime(1973, 3, 11, 6, 30, 45),
datetime.date(1975, 1, 5)
)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_dates):
url = '/datetest{0}'.format(i)
app.add_url_rule(url, str(i), lambda val=d: flask.jsonify(x=val))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data)['x'] == http_date(d.timetuple())
def test_post_empty_json_adds_exception_to_response_content_in_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = True
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' in rv.data
def test_post_empty_json_wont_add_exception_to_response_if_no_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = False
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' not in rv.data
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
assert rv.status_code == 400
def test_json_custom_mimetypes(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.request.get_json()
c = app.test_client()
rv = c.post('/json', data='"foo"', content_type='application/x+json')
assert rv.data == b'foo'
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
assert resp.data == u'Hällo Wörld'.encode('utf-8')
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == '"\\u2603"'
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == u'"\u2603"'
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
assert rv.data == b'3'
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
assert rv == u'"\\u003c/script\\u003e"'
assert type(rv) == text_type
rv = render('{{ "</script>"|tojson }}')
assert rv == '"\\u003c/script\\u003e"'
rv = render('{{ "<\0/script>"|tojson }}')
assert rv == '"\\u003c\\u0000/script\\u003e"'
rv = render('{{ "<!--<script>"|tojson }}')
assert rv == '"\\u003c!--\\u003cscript\\u003e"'
rv = render('{{ "&"|tojson }}')
assert rv == '"\\u0026"'
rv = render('{{ "\'"|tojson }}')
assert rv == '"\\u0027"'
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
assert rv == '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>'
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
assert rv.data == b'"<42>"'
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
assert rv.status_code == 200
assert rv.data == u'정상처리'.encode('utf-8')
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
assert app.config['JSON_SORT_KEYS'] == True
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
sorted_by_str = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo"',
'}',
'}'
]
sorted_by_int = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
]
try:
assert lines == sorted_by_int
except AssertionError:
assert lines == sorted_by_str
class TestSendfile(object):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert rv.mimetype == 'text/html'
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
assert rv.data == f.read()
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
assert rv.mimetype == 'text/html'
rv.close()
def test_send_file_object(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'), mode='rb')
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
assert rv.data == f.read()
assert rv.mimetype == 'text/html'
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
assert rv.mimetype == 'text/html'
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = False
with app.test_request_context():
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'application/octet-stream'
rv.close()
# etags
assert len(captured) == 1
with catch_deprecation_warnings() as captured:
class PyStringIO(object):
def __init__(self, *args, **kwargs):
self._io = StringIO(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._io, name)
f = PyStringIO('Test')
f.name = 'test.txt'
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# attachment_filename and etags
assert len(captured) == 3
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# etags
assert len(captured) == 1
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
assert 'x-sendfile' not in rv.headers
rv.close()
# etags
assert len(captured) == 1
def test_attachment(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
rv.close()
# mimetypes + etag
assert len(captured) == 2
with app.test_request_context():
assert options['filename'] == 'index.html'
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.html'
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
assert rv.mimetype == 'text/plain'
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.txt'
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
def test_send_from_directory(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
rv = flask.send_from_directory('static', 'hello.txt')
rv.direct_passthrough = False
assert rv.data.strip() == b'Hello Subdomain'
rv.close()
class TestLogging(object):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
assert app.logger is logger1
assert logger1.name == __name__
app.logger_name = __name__ + '/test_logger_cache'
assert app.logger is not logger1
def test_debug_log(self, capsys):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
c.get('/')
out, err = capsys.readouterr()
assert 'WARNING in test_helpers [' in err
assert os.path.basename(__file__.rsplit('.', 1)[0] + '.py') in err
assert 'the standard library is dead' in err
assert 'this is a debug statement' in err
with pytest.raises(ZeroDivisionError):
c.get('/exc')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
assert app.logger.level == 10
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
err = out.getvalue()
assert 'Exception on / [GET]' in err
assert 'Traceback (most recent call last):' in err
assert '1 // 0' in err
assert 'ZeroDivisionError:' in err
def test_processor_exceptions(self):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _anchor='x y') == '/#x%20y'
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
pytest.raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
assert flask.url_for('myview', _method='GET') == '/myview/'
assert flask.url_for('myview', id=42, _method='GET') == '/myview/42'
assert flask.url_for('myview', _method='POST') == '/myview/create'
class TestNoImports(object):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmpdir):
modules_tmpdir.join('importerror.py').write('raise NotImplementedError()')
try:
flask.Flask('importerror')
except NotImplementedError:
assert False, 'Flask(import_name) is importing import_name.'
class TestStreaming(object):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
assert called == [42]
|
gsnedders/presto-testo | refs/heads/master | css/image-fit/reftests/svg-no-viewBox/build.py | 4 | #!/usr/bin/python
pARs = ['none', 'xMidYMid meet', 'xMidYMid slice']
fits = ['fill', 'contain', 'cover', 'auto', 'none']
test_template = """<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>-o-object-fit:%s; pAR:%s</title>
<link rel="stylesheet" href="../../support/reftests.css"/>
<style>
#test > * { -o-object-fit:%s; overflow:hidden; height:100px }
</style>
</head>
<body>
<div id="test">
<svg xmlns="http://www.w3.org/2000/svg" width="220" height="220" preserveAspectRatio="%s">
<polygon points="65,1 155,1 219,65 219,155 155,219 65,219 1,155 1,65" fill="#d9bb7a" stroke="black"/>
</svg>
</div>
</body>
</html>
"""
ref_template = """<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Reference for -o-object-fit:%s; pAR:%s</title>
<link rel="stylesheet" href="../../support/reftests.css"/>
<style>
.helper { overflow:hidden; height:100px }
.helper > * { %s; -o-object-fit:fill }
</style>
</head>
<body>
<div id="ref">
<span class="helper"><img src="../../support/simple-shape.svg"/></span>
</div>
</body>
</html>
"""
reftest_list = ''
ref_hashes = {}
for pAR in pARs:
xpAR = pAR
if xpAR == 'xMidYMid meet':
xpAR = 'meet'
if xpAR == 'xMidYMid slice':
xpAR = 'slice'
for fit in fits:
if fit == 'fill':
refw = 200
refh = 100
elif fit == 'contain':
refw = 100
refh = 100
elif fit == 'cover':
refw = 200
refh = 200
else: # auto, none
refw = 220
refh = 220
if fit == 'auto':
refx = refy = 0;
else:
refx = 200/2 - refw/2
refy = 100/2 - refh/2
test_filename = "%s_%s.xhtml" % (xpAR, fit)
test_file = open(test_filename, 'w')
test_file.write(test_template % (fit, xpAR, fit, pAR))
test_file.close()
refstyle = 'left:%spx; top: %spx; width:%spx; height:%spx' % (refx, refy, refw, refh)
if [v for k, v in ref_hashes.iteritems() if k == refstyle] == []:
ref_filename = "%s_%s-ref.xhtml" % (xpAR, fit)
ref_hashes[refstyle] = ref_filename
ref_file = open(ref_filename, 'w')
ref_file.write(ref_template % (fit, xpAR, refstyle))
ref_file.close()
else:
ref_filename = ref_hashes[refstyle]
reftest_list += '== ' + test_filename + ' ' + ref_filename + '\n'
list_file = open('reftest.list', 'w')
list_file.write(reftest_list)
list_file.close()
|
magic0704/neutron | refs/heads/master | neutron/plugins/nec/config.py | 26 | # Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.agent.common import config
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
ofc_opts = [
cfg.StrOpt('host', default='127.0.0.1',
help=_("Host to connect to.")),
cfg.StrOpt('path_prefix', default='',
help=_("Base URL of OFC REST API. "
"It is prepended to each API request.")),
cfg.StrOpt('port', default='8888',
help=_("Port to connect to.")),
cfg.StrOpt('driver', default='trema',
help=_("Driver to use.")),
cfg.BoolOpt('enable_packet_filter', default=True,
help=_("Enable packet filter.")),
cfg.BoolOpt('support_packet_filter_on_ofc_router', default=True,
help=_("Support packet filter on OFC router interface.")),
cfg.BoolOpt('use_ssl', default=False,
help=_("Use SSL to connect.")),
cfg.StrOpt('key_file',
help=_("Location of key file.")),
cfg.StrOpt('cert_file',
help=_("Location of certificate file.")),
cfg.BoolOpt('insecure_ssl', default=False,
help=_("Disable SSL certificate verification.")),
cfg.IntOpt('api_max_attempts', default=3,
help=_("Maximum attempts per OFC API request. "
"NEC plugin retries API request to OFC "
"when OFC returns ServiceUnavailable (503). "
"The value must be greater than 0.")),
]
provider_opts = [
cfg.StrOpt('default_router_provider',
default='l3-agent',
help=_('Default router provider to use.')),
cfg.ListOpt('router_providers',
default=['l3-agent', 'openflow'],
help=_('List of enabled router providers.'))
]
def register_plugin_opts():
cfg.CONF.register_opts(ofc_opts, "OFC")
cfg.CONF.register_opts(provider_opts, "PROVIDER")
def register_agent_opts():
cfg.CONF.register_opts(agent_opts, "AGENT")
cfg.CONF.register_opts(ovs_opts, "OVS")
config.register_agent_state_opts_helper(cfg.CONF)
|
google/ehr-predictions | refs/heads/main | ehr_prediction_modeling/tasks/labs_task.py | 1 | # coding=utf-8
# Copyright 2020 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Labs task implementation to be used in experiments."""
from typing import Dict, List, Mapping, Optional, Tuple, Union
from ehr_prediction_modeling import types
from ehr_prediction_modeling.tasks import base_task
from ehr_prediction_modeling.tasks import task_data
from ehr_prediction_modeling.tasks import task_masks
from ehr_prediction_modeling.utils import batches
from ehr_prediction_modeling.utils import label_utils
from ehr_prediction_modeling.utils import loss_utils
from ehr_prediction_modeling.utils import mask_utils
import tensorflow.compat.v1 as tf
from ehr_prediction_modeling import configdict
class LabsRegression(base_task.Task):
"""Task implementation for prediction of future lab or vitals value."""
task_type = types.TaskTypes.LAB_REGRESSION
# Only max is illustrated in the fake dataset.
_aggregate_types = ("max", "min", "mean", "std")
@property
def default_masks(self) -> List[str]:
return [mask_utils.IGNORE_MASK, mask_utils.UNKNOWN_LOOKAHEAD_MASK]
@property
def _unformatted_supported_eval_masks(self) -> Dict[str, List[str]]:
"""See base class."""
return {
task_masks.Eval.BASE:
self.default_masks,
}
def __init__(self, config: configdict.ConfigDict):
label_keys = []
self._target_names = []
self._num_labs = len(config.labs)
self._accumulate_logits = config.accumulate_logits
self._window_times = [] # one value per (lab, aggregation, time horizon)
for lab_id, lab_name in config.labs:
for time_window in config.window_times:
for aggregation in config.aggregations:
if aggregation not in LabsRegression._aggregate_types:
raise ValueError(
"LabsRegression aggregation {} is invalid. Must be one of {}"
.format(aggregation, LabsRegression._aggregate_types))
label_keys.append(
label_utils.get_lab_label_lookahead_key(
lab_id,
time_window,
# In the fake data, max aggregation has no suffix.
suffix=None if aggregation == "max" else aggregation))
lab_name = lab_name.replace(" ", "_").lower()
target_name = "{aggregation}_{lab_name}_in_{hours}h".format(
aggregation=aggregation, lab_name=lab_name, hours=time_window)
self._window_times.append(time_window)
self._target_names.append(target_name)
super().__init__(config, label_keys=label_keys)
def get_label_dicts(
self
) -> Tuple[Dict[Optional[str], Union[
tf.FixedLenSequenceFeature, tf.FixedLenFeature]], Dict[Union[
str, None], Union[tf.FixedLenSequenceFeature, tf.FixedLenFeature]]]:
"""Gets the feature dictionaries to parse a tf.SequenceExample.
These dicts are used in the parse op (either batched or non-batched):
https://www.tensorflow.org/api_docs/python/tf/io/parse_single_sequence_example#args
They should include any feature that is used by this task.
Returns:
context_d: Dictionaries of TF features to read in context.
sequence_d: Dictionaries of TF features to read in sequence.
"""
context_d = {}
sequence_d = {
label_key: tf.FixedLenSequenceFeature([1], tf.float32)
for label_key in self._label_keys
}
return context_d, sequence_d
@property
def num_targets(self) -> int:
return len(self._label_keys)
@property
def target_names(self) -> List[str]:
return self._target_names
@property
def window_hours(self) -> List[int]:
return self._window_times
@property
def prediction_task_type(self) -> str:
return types.TaskType.REGRESSION
def _accumulate_per_lab(self, logits: tf.Tensor) -> tf.Tensor:
"""Accumulates logits across time windows for each lab separately."""
logits_split_by_lab = tf.split(
logits, num_or_size_splits=self._num_labs, axis=-1)
accumulated_logits_list = [
tf.cumsum(lab_logits, axis=-1) for lab_logits in logits_split_by_lab
]
return tf.concat(accumulated_logits_list, axis=-1)
def _get_all_task_variables(
self, batch: batches.TFBatch,
model_output: tf.Tensor) -> task_data.TaskVariables:
"""Computes variables for LabsRegression task.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
model_output: Tensor, the output from the model, shape wnt [num_unroll,
batch_size, dim_model_output].
Returns:
task_data.TaskVariables with all the variables from this task.
"""
logits = self.layer.get_logits(model_output)
if self._accumulate_logits:
logits = self._accumulate_per_lab(logits)
targets = self.get_targets(batch)
train_loss_mask = self.get_train_mask(batch)
loss = loss_utils.loss_fn(logits, targets, train_loss_mask,
self._config.loss_type)
eval_mask_dict = self.get_eval_mask_dict(batch)
return task_data.TaskVariables(
loss=loss,
targets=targets,
predictions=logits,
train_mask=train_loss_mask,
eval_mask_dict=eval_mask_dict,
)
@classmethod
def config(
cls,
window_times: List[int],
aggregations: List[str],
labs: Optional[List[Tuple[str, str]]] = None,
train_mask: str = task_masks.Train.BASE,
eval_masks: Optional[List[str]] = None,
loss_type: str = types.TaskLossType.L2,
loss_weight: float = 5.0,
accumulate_logits: bool = False,
task_layer_sizes: Optional[List[int]] = None,
regularization_type: str = types.RegularizationType.NONE,
regularization_weight: float = 0.,
name: str = types.TaskNames.LAB_REGRESSION,
) -> configdict.ConfigDict:
"""Generates a config object for LabsRegression.
Args:
window_times: list of int, prediction windows for the labs regression.
aggregations: list of string, aggregations to use per lab. Should be one
LabsRegression._aggregate_types.
labs: list of tuples, (lab_id, lab_name). If not given, a default
list will be used.
train_mask: str, name of the mask to be used in train.
eval_masks: list of str, names of the masks to be used in eval.
loss_type: str, type of loss to be used.
loss_weight: float, weight of this task loss.
accumulate_logits: bool, whether to create a CDF over logits for each lab
task to encourage monotonicity for increasing time windows. Should only
be imposed if we are predicting the maximum lab value.
task_layer_sizes: array of int, the size of the task-specific layers to
pass the model output through before a final logistic layer. If None,
there is just the final logistic layer.
regularization_type: one of types.RegularizationType, the regularization
to be applied to the task layer(s).
regularization_weight: float, the weight of the regularization penalty to
apply to logistic layers associated with this task.
name: str, name of this task for visualization and debuggigng.
Returns:
A ConfigDict to be used to instantiate a LabsRegression task.
"""
config = configdict.ConfigDict()
config.task_type = LabsRegression.task_type
config.labs = labs or [("42", "Lab 1"), ("43", "Lab 2"),
("44", "Lab 3"), ("45", "Lab 4"),
("46", "Lab 5")]
config.aggregations = aggregations
config.hours_after_admission = [] # unused, here for consistency
config.window_times = sorted(window_times)
config.train_mask = train_mask
config.eval_masks = eval_masks or []
config.loss_type = loss_type
config.loss_weight = loss_weight
config.accumulate_logits = accumulate_logits
config.name = name
config.task_layer_sizes = task_layer_sizes or []
config.regularization_type = regularization_type
config.regularization_weight = regularization_weight
return config
@classmethod
def default_configs(cls) -> List[configdict.ConfigDict]:
"""Generates a default config object for LabsRegression."""
return [
LabsRegression.config(
window_times=label_utils.DEFAULT_LOOKAHEAD_WINDOWS,
aggregations=["max"],
eval_masks=None,
loss_type=types.TaskLossType.L2,
loss_weight=5.0)
]
|
cpaulik/scipy | refs/heads/master | tools/validate_runtests_log.py | 75 | #!/usr/bin/env python
"""
Take the test runner log output from the stdin, looking for
the magic line nose runner prints when the test run was successful.
In an ideal world, this should be done directly in runtests.py using the
nose API, some failure modes are fooling nose to terminate the python process
with zero exit code, see, eg, https://github.com/scipy/scipy/issues/4736
In short, lapack's xerbla can terminate the process with a fortran level STOP
command, which (i) aborts the py process so that runtests.py does not finish,
and (ii) the exit code is implementation-defined.
Also check that the number of tests run is larger than some baseline number
(taken from the state of the master branch at some random point in time.)
This probably could/should be made less brittle.
"""
from __future__ import print_function
import sys
import re
if __name__ == "__main__":
# full or fast test suite?
try:
testmode = sys.argv[1]
if testmode not in ('fast', 'full'):
raise IndexError
except IndexError:
raise ValueError("Usage: validate.py {full|fast} < logfile.")
# fetch the expected number of tests
# these numbers are for 6abad09
# XXX: this should probably track the commit hash or commit date
expected_size = {'full': 19055,
'fast': 17738}
# read in the log, parse for the nose printout:
# Ran NNN tests in MMMs
# <blank line>
# OK (SKIP=X, KNOWNFAIL=Y) or FAILED (errors=X, failures=Y)
r = re.compile("Ran (?P<num_tests>\d+) tests in (?P<time>\d+\S+)")
status, found_it = False, False
while True:
line = sys.stdin.readline()
if not line:
break
m = r.search(line)
if m:
found_it = True
sys.stdin.readline() # skip the next one
line = sys.stdin.readline()
if "OK" in line:
status = True
break
if found_it:
# did it errored or failed?
if not status:
print("*** Looks like some tests failed.")
sys.exit(-1)
# now check that the number of tests run is reasonable
expected = expected_size[testmode]
actual = int(m.group('num_tests'))
if actual < expected:
print("*** Too few tests: expected %s, run %s" % (expected, actual))
sys.exit(1)
else:
sys.exit(0)
else:
print('*** Test runner validation errored: did the run really finish?')
sys.exit(-1)
|
Distrotech/intellij-community | refs/heads/master | python/testData/completion/parameterFromUsages.after.py | 83 | def foo(bar):
bar.append()
def baz():
foo(['hello', 'world'])
|
rahul-ramadas/leetcode | refs/heads/master | search-a-2d-matrix-ii/Solution.36544532.py | 1 | class Solution:
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
for row in matrix:
if target in row:
return True
return False
|
Yannig/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_zone_facts.py | 47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_zone_facts
short_description: Gathering facts of zones from Apache CloudStack based clouds.
description:
- Gathering facts from the API of a zone.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the zone.
required: true
aliases: [ zone ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- cs_zone_facts:
name: ch-gva-1
delegate_to: localhost
- debug:
var: cloudstack_zone
'''
RETURN = '''
---
cloudstack_zone.id:
description: UUID of the zone.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
cloudstack_zone.name:
description: Name of the zone.
returned: success
type: string
sample: zone01
cloudstack_zone.dns1:
description: First DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
cloudstack_zone.dns2:
description: Second DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
cloudstack_zone.internal_dns1:
description: First internal DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
cloudstack_zone.internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
cloudstack_zone.dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8888"
cloudstack_zone.dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8844"
cloudstack_zone.allocation_state:
description: State of the zone.
returned: success
type: string
sample: Enabled
cloudstack_zone.domain:
description: Domain the zone is related to.
returned: success
type: string
sample: ROOT
cloudstack_zone.network_domain:
description: Network domain for the zone.
returned: success
type: string
sample: example.com
cloudstack_zone.network_type:
description: Network type for the zone.
returned: success
type: string
sample: basic
cloudstack_zone.local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
cloudstack_zone.securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
cloudstack_zone.guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: string
sample: 10.1.1.0/24
cloudstack_zone.dhcp_provider:
description: DHCP provider for the zone
returned: success
type: string
sample: VirtualRouter
cloudstack_zone.zone_token:
description: Zone token
returned: success
type: string
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
cloudstack_zone.tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
)
class AnsibleCloudStackZoneFacts(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZoneFacts, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
self.facts = {
'cloudstack_zone': None,
}
def get_zone(self):
if not self.zone:
super(AnsibleCloudStackZoneFacts, self).get_zone()
return self.zone
def run(self):
zone = self.get_zone()
self.facts['cloudstack_zone'] = self.get_result(zone)
return self.facts
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
zone=dict(required=True, aliases=['name']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
cs_zone_facts = AnsibleCloudStackZoneFacts(module=module).run()
cs_facts_result = dict(changed=False, ansible_facts=cs_zone_facts)
module.exit_json(**cs_facts_result)
if __name__ == '__main__':
main()
|
huran2014/huran.github.io | refs/heads/master | wot_gateway/usr/lib/python2.7/urllib2.py | 33 | """An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
exceptions:
URLError -- A subclass of IOError, individual protocols have their own
specific subclass.
HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response.
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib2
# set up authentication info
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
# install it
urllib2.install_opener(opener)
f = urllib2.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splittag,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies, proxy_bypass
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
_opener = build_opener()
return _opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 fo discussion.
@property
def reason(self):
return self.msg
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_':
name = attr[12:]
if hasattr(Request, 'get_' + name):
getattr(self, 'get_' + name)()
return getattr(self, attr)
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
return Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
# fix a possible malformed URL
urlparts = urlparse.urlparse(newurl)
if not urlparts.path:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code,
msg + " - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\'])(.*?)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
self.retried = 0
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if self.retried > 5:
# retry sending the username:password 5 times before failing.
raise HTTPError(req.get_full_url(), 401, "basic auth failed",
headers, None)
else:
self.retried += 1
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if scheme.lower() == 'basic':
response = self.retry_http_basic_auth(host, req, realm)
if response and response.code != 401:
self.retried = 0
return response
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.headers.get(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.reset_retry_count()
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
self.reset_retry_count()
return response
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host, timeout=req.timeout) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error, err: # XXX what error?
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
|
farrajota/dbcollection | refs/heads/master | dbcollection/datasets/__init__.py | 2 | """
This module contains scripts to download/process
all datasets available in dbcollection.
These scripts are self contained, meaning they can be imported
and used to manually setup a dataset.
"""
from __future__ import print_function
import os
import h5py
import numpy as np
from dbcollection.utils.hdf5 import HDF5Manager
from dbcollection.utils.url import download_extract_urls
from dbcollection.utils.string_ascii import convert_str_to_ascii as str2ascii
class BaseDataset(object):
"""Base class for download/processing a dataset.
Parameters
----------
data_path : str
Path to the data directory.
cache_path : str
Path to the cache file
extract_data : bool, optional
Extracts the downloaded files if they are compacted.
verbose : bool, optional
Displays text information to the screen (if true).
Attributes
----------
data_path : str
Path to the data directory.
cache_path : str
Path to the cache file
extract_data : bool
Extracts the downloaded files if they are compacted.
verbose : bool
Displays text information to the screen (if true).
urls : list
List of URL paths to download.
keywords : list
List of keywords to classify datasets.
tasks : dict
Dataset's tasks for processing.
default_task : str
Default task name.
"""
urls = () # list of urls to download
keywords = () # List of keywords to classify/categorize datasets in the cache.
tasks = {} # dictionary of available tasks to process
default_task = '' # Defines the default class
def __init__(self, data_path, cache_path, extract_data=True, verbose=True):
"""Initialize class."""
assert isinstance(data_path, str), "Must insert a valid data path"
assert isinstance(cache_path, str), "Must insert a valid cache path"
self.data_path = data_path
self.cache_path = cache_path
self.extract_data = extract_data
self.verbose = verbose
def download(self):
"""Downloads and extract files to disk.
Returns
-------
tuple
A list of keywords.
"""
download_extract_urls(
urls=self.urls,
save_dir=self.data_path,
extract_data=self.extract_data,
verbose=self.verbose
)
def process(self, task='default'):
"""Processes the metadata of a task.
Parameters
----------
task : str, optional
Name of the task.
Returns
-------
dict
Returns a dictionary with the task name as key and the filename as value.
"""
task_ = self.parse_task_name(task)
if self.verbose:
print("\nProcessing '{}' task:".format(task_))
hdf5_filename = self.process_metadata(task_)
return {task_: {"filename": hdf5_filename, "categories": self.keywords}}
def parse_task_name(self, task):
"""Parses the task name to a valid name."""
if task == '' or task == 'default':
return self.default_task
else:
return task
def process_metadata(self, task):
"""Processes the metadata for a task.
Parameters
----------
task : str
Name of the task.
Returns
-------
str
File name + path of the resulting HDFR5 metadata file of the task.
"""
constructor = self.get_task_constructor(task)
processer = constructor(data_path=self.data_path,
cache_path=self.cache_path,
verbose=self.verbose)
return processer.run()
def get_task_constructor(self, task):
"""Returns the class constructor for the input task.
Parameters
----------
task : str
Name of the task.
Returns
-------
BaseTask
Constructor to process the metadata of a task.
"""
assert task
return self.tasks[task]
class BaseTask(object):
"""Base class for processing the metadata of a task of a dataset.
Parameters
----------
data_path : str
Path to the data directory.
cache_path : str
Path to the cache file
verbose : bool, optional
Displays text information to the screen (if true).
Attributes
----------
data_path : str
Path to the data directory of the dataset.
cache_path : str
Path to store the HDF5 metadata file of a dataset in the cache directory.
verbose : bool
Displays text information to the screen (if true).
filename_h5 : str
Name of the HDF5 file.
hdf5_filepath : str
File name + path of the HDF5 metadata file in disk.
"""
filename_h5 = '' # name of the task file
def __init__(self, data_path, cache_path, verbose=True):
"""Initialize class."""
assert data_path, "Must insert a valid data path"
assert cache_path, "Must insert a valid cache path"
self.cache_path = cache_path
self.data_path = data_path
self.verbose = verbose
self.hdf5_filepath = self.get_hdf5_save_filename()
self.hdf5_manager = None
def get_hdf5_save_filename(self):
"""Builds the HDF5 file name + path on disk."""
return os.path.join(self.cache_path, self.filename_h5 + '.h5')
def run(self):
"""Main Method. Runs the task metadata processing.
It creates an HDF5 file in disk to store the resulting
subgroups of the dataset's set partitions (e.g., train/val/test/etc.).
Then, it loads the dataset's raw metadata from disk into memory as a
generator, retrieves the data fields obtained in the processing stage
and saves them into an HDF5 file in disk.
Returns
-------
str
File name + path of the task's HDF5 metadata file.
"""
self.setup_hdf5_manager()
data_generator = self.load_data()
self.process_metadata(data_generator)
self.teardown_hdf5_manager()
return self.hdf5_filepath
def setup_hdf5_manager(self):
"""Sets up the metadata manager to store the processed data to disk."""
if self.verbose:
print('\n==> Storing metadata to file: {}'.format(self.hdf5_filepath))
self.hdf5_manager = HDF5Manager(filename=self.hdf5_filepath)
def load_data(self):
"""Loads the dataset's (meta)data from disk (create a generator).
Load data from annnotations and split it to corresponding
sets (train, val, test, etc.)
Returns
-------
generator
A sequence of dictionary objects with a key-value pair
with the name of the set split and the data.
"""
pass # stub
def process_metadata(self, data_generator):
"""Processes the dataset's (meta)data and stores it into an HDF5 file."""
for data in data_generator:
for set_name in data:
if self.verbose:
print('\nSaving set metadata: {}'.format(set_name))
self.process_set_metadata(data[set_name], set_name)
def process_set_metadata(self, data, set_name):
"""Sets up the set's data fields to be stored in the HDF5 metadata file.
All fields set in this method are organized as a single big matrix.
This results in much faster data retrieval than by transversing nested
groups + datasets in an HDF5 file.
Parameters
----------
data : dict
Dictionary containing the data annotations of a set split.
set_name : str
Name of the set split.
"""
pass
def teardown_hdf5_manager(self):
"""Sets up the MetadataManager object to manage the metadata save process to disk."""
self.hdf5_manager.close()
class BaseField(object):
"""Base class for the dataset's data fields processor."""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def save_field_to_hdf5(self, set_name, field, data, **kwargs):
"""Saves data of a field into the HDF% metadata file.
Parameters
----------
set_name: str
Name of the set split.
field : str
Name of the data field.
data : np.ndarray
Numpy ndarray of the field's data.
"""
self.hdf5_manager.add_field_to_group(
group=set_name,
field=field,
data=data,
**kwargs
)
class BaseColumnField(BaseField):
"""Base class for the dataset's column data field processor."""
fields = []
def process(self):
"""Processes and saves the columns metadata to hdf5."""
self.save_field_to_hdf5(
set_name=self.set_name,
field='__COLUMNS__',
data=str2ascii(self.fields),
dtype=np.uint8,
fillvalue=0
)
class BaseMetadataField(BaseField):
"""Base class for the dataset's metadata field processor."""
fields = []
def process(self):
"""Processes and saves the metadata name and types info to hdf5."""
self.save_fields_names()
self.save_fields_types()
def save_fields_names(self):
columns = [field['name'] for field in self.fields]
self.save_field_to_hdf5(
set_name=self.set_name,
field='__COLUMNS__',
data=str2ascii(columns),
dtype=np.uint8,
fillvalue=0
)
def save_fields_types(self):
columns = [field['type'] for field in self.fields]
self.save_field_to_hdf5(
set_name=self.set_name,
field='__TYPES__',
data=str2ascii(columns),
dtype=np.uint8,
fillvalue=0
)
|
beck/django | refs/heads/master | tests/bash_completion/tests.py | 327 | """
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
|
azureplus/hue | refs/heads/master | desktop/core/ext-py/PyYAML-3.09/tests/lib3/test_mark.py | 62 |
import yaml
def test_marks(marks_filename, verbose=False):
inputs = open(marks_filename, 'r').read().split('---\n')[1:]
for input in inputs:
index = 0
line = 0
column = 0
while input[index] != '*':
if input[index] == '\n':
line += 1
column = 0
else:
column += 1
index += 1
mark = yaml.Mark(marks_filename, index, line, column, input, index)
snippet = mark.get_snippet(indent=2, max_length=79)
if verbose:
print(snippet)
assert isinstance(snippet, str), type(snippet)
assert snippet.count('\n') == 1, snippet.count('\n')
data, pointer = snippet.split('\n')
assert len(data) < 82, len(data)
assert data[len(pointer)-1] == '*', data[len(pointer)-1]
test_marks.unittest = ['.marks']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
|
davidyezsetz/kuma | refs/heads/master | vendor/packages/pyparsing/examples/parseListString.py | 16 | # parseListString.py
#
# Copyright, 2006, by Paul McGuire
#
from pyparsing import *
# first pass
lbrack = Literal("[")
rbrack = Literal("]")
integer = Word(nums).setName("integer")
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real")
listItem = real | integer | quotedString
listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
print listStr.parseString(test)
# second pass, cleanup and add converters
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
cvtInt = lambda s,l,toks: int(toks[0])
integer = Word(nums).setName("integer").setParseAction( cvtInt )
cvtReal = lambda s,l,toks: float(toks[0])
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real").setParseAction( cvtReal )
listItem = real | integer | quotedString.setParseAction( removeQuotes )
listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
print listStr.parseString(test)
# third pass, add nested list support, and tuples, too!
cvtInt = lambda s,l,toks: int(toks[0])
cvtReal = lambda s,l,toks: float(toks[0])
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
integer = Word(nums).setName("integer").setParseAction( cvtInt )
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real").setParseAction( cvtReal )
tupleStr = Forward()
listStr = Forward()
listItem = real | integer | quotedString.setParseAction(removeQuotes) | Group(listStr) | tupleStr
tupleStr << ( Suppress("(") + delimitedList(listItem) + Optional(Suppress(",")) + Suppress(")") )
tupleStr.setParseAction( lambda t:tuple(t.asList()) )
listStr << lbrack + delimitedList(listItem) + Optional(Suppress(",")) + rbrack
test = "['a', 100, ('A', [101,102]), 3.14, [ +2.718, 'xyzzy', -1.414] ]"
print listStr.parseString(test)
# fourth pass, just parsing tuples of numbers
#~ from pyparsing import *
#~ integer = (Word(nums)|Word('-+',nums)).setName("integer")
#~ real = Combine(integer + "." + Optional(Word(nums))).setName("real")
#~ tupleStr = Forward().setName("tuple")
#~ tupleItem = real | integer | tupleStr
#~ tupleStr << ( Suppress("(") + delimitedList(tupleItem) +
#~ Optional(Suppress(",")) + Suppress(")") )
#~ # add parse actions to do conversion during parsing
#~ integer.setParseAction( lambda toks: int(toks[0]) )
#~ real.setParseAction( lambda toks: float(toks[0]) )
#~ tupleStr.setParseAction( lambda toks: tuple(toks) )
#~ s = '((1,2), (3,4), (-5,9.2),)'
#~ print tupleStr.parseString(s)[0]
cvtInt = lambda s,l,toks: int(toks[0])
cvtReal = lambda s,l,toks: float(toks[0])
cvtDict = lambda s,l,toks: dict(toks[0])
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
lbrace = Literal("{").suppress()
rbrace = Literal("}").suppress()
colon = Literal(":").suppress()
integer = Word(nums).setName("integer").setParseAction( cvtInt )
real = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums))).setName("real").setParseAction( cvtReal )
tupleStr = Forward()
listStr = Forward()
dictStr = Forward()
listItem = real | integer | quotedString.setParseAction(removeQuotes) | Group(listStr) | tupleStr | dictStr
tupleStr << ( Suppress("(") + delimitedList(listItem) + Optional(Suppress(",")) + Suppress(")") )
tupleStr.setParseAction( lambda t:tuple(t.asList()) )
listStr << lbrack + delimitedList(listItem) + Optional(Suppress(",")) + rbrack
dictStr << rbrace + delimitedList( Group( listItem + colon + listItem ) ) + rbrace
test = "['a', 100, ('A', [101,102]), 3.14, [ +2.718, 'xyzzy', -1.414] ]"
test = '[{0: [2], 1: []}, {0: [], 1: [], 2: []}, {0: [1, 2]}]'
print listStr.parseString(test)
|
liqd/adhocracy | refs/heads/develop | src/adhocracy/model/user.py | 4 | from copy import copy
import hashlib
import os
import logging
from datetime import datetime
from babel import Locale
from pylons.i18n import _
from sqlalchemy import Table, Column, func, or_
from sqlalchemy import Boolean, DateTime, Integer, Unicode, UnicodeText
from sqlalchemy.orm import eagerload_all
from adhocracy import config
from adhocracy.model import meta
from adhocracy.model import instance_filter as ifilter
from adhocracy.model.core import JSONEncodedDict
from adhocracy.model.core import MutationDict
from adhocracy.model.instance import Instance
log = logging.getLogger(__name__)
user_table = Table(
'user', meta.data,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=False),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=True),
Column('locale', Unicode(10), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime),
Column('banned', Boolean, default=False),
Column('no_help', Boolean, default=False, nullable=True),
Column('page_size', Integer, default=10, nullable=True),
Column('proposal_sort_order', Unicode(50), default=None, nullable=True),
Column('gender', Unicode(1), default=None),
Column('_is_organization', Boolean, default=False),
Column('email_messages', Boolean, default=True),
Column('welcome_code', Unicode(255), nullable=True),
Column('optional_attributes', MutationDict.as_mutable(JSONEncodedDict)),
)
class User(meta.Indexable):
IMPORT_MARKER = 'i__'
def __init__(self, user_name, email, password, locale, display_name=None,
bio=None, omit_activation_code=False):
self.user_name = user_name
self._set_email(email, omit_activation_code)
self.password = password
self.locale = locale
self.display_name = display_name
self.bio = bio
self.banned = False
@property
def name(self):
if self.delete_time:
return self.user_name
if self.display_name and len(self.display_name.strip()) > 0:
return self.display_name.strip()
return self.user_name
def _get_locale(self):
if not self._locale:
return None
return Locale.parse(self._locale)
def _set_locale(self, locale):
self._locale = unicode(locale)
locale = property(_get_locale, _set_locale)
def _get_email(self):
return self._email
def _set_email(self, email, omit_activation_code=False):
import adhocracy.lib.util as util
if not omit_activation_code and not self._email == email:
self.activation_code = util.random_token()
self._email = email
email = property(_get_email, _set_email)
@property
def email_hash(self):
return hashlib.sha1(self.email).hexdigest()
@property
def is_organization(self):
allow = config.get_bool('adhocracy.allow_organization')
return allow and self._is_organization
def badge_groups(self):
current_instance = ifilter.get_instance()
groups = []
for badge in self.badges:
group = badge.group
if (group is not None and group not in groups
and badge.instance in [None, current_instance]):
groups.append(group)
return groups
def get_badges(self, instance=None):
"""
Get user badges which are appropriate in the given context.
"""
return [b for b in self.badges if b.instance is None
or b.instance is instance]
def membership_groups(self):
from membership import Membership
current_instance = ifilter.get_instance()
memberships_q = meta.Session.query(Membership).filter(
Membership.user_id == self.id)
if current_instance is None:
memberships_q = memberships_q.filter(
Membership.instance_id == None) # noqa
else:
memberships_q = memberships_q.filter(or_(
Membership.instance_id == None, # noqa
Membership.instance_id == current_instance.id
))
memberships = memberships_q.all()
return [m.group for m in memberships if not m.is_expired()]
@property
def groups(self):
return list(set(self.badge_groups() + self.membership_groups()))
def instance_groups(self):
return filter(lambda g: g.is_instance_group(), self.groups)
def _has_permission(self, permission_name):
for group in self.groups:
for perm in group.permissions:
if perm.permission_name == permission_name:
return True
return False
def instance_membership(self, instance):
if not instance:
return None
from membership import Membership
memberships = meta.Session.query(Membership).filter(
Membership.user_id == self.id,
Membership.instance_id == instance.id)\
.all()
for membership in memberships:
if not membership.is_expired():
return membership
return None
def is_member(self, instance):
return self.instance_membership(instance) is not None
@property
def instances(self):
return self.get_instances()
def get_instances(self, include_hidden=False):
instances = []
for membership in self.memberships:
if (not membership.is_expired()) and \
(membership.instance is not None) and \
(include_hidden or not membership.instance.hidden):
instances.append(membership.instance)
return list(set(instances))
def real_instances(self, exclude_current=False):
excluded_keys = copy(Instance.SPECIAL_KEYS)
if exclude_current:
current_instance = ifilter.get_instance()
if current_instance is not None:
excluded_keys.append(ifilter.get_instance().key)
return sorted(
filter(lambda i: i.key not in excluded_keys, self.instances),
key=lambda i: i.key)
@property
def twitter(self):
for twitter in self.twitters:
if not twitter.is_deleted():
return twitter
return None
@property
def openids(self):
_ids = []
for openid in self._openids:
if not openid.is_deleted():
_ids.append(openid)
return _ids
@property
def velruse(self):
_ids = []
for velruse in self._velruse:
if not velruse.is_deleted():
_ids.append(velruse)
return _ids
@property
def num_watches(self):
from watch import Watch
q = meta.Session.query(Watch)
q = q.filter(Watch.user == self)
q = q.filter(or_(Watch.delete_time == None, # noqa
Watch.delete_time >= datetime.utcnow()))
return q.count()
def _set_password(self, password):
"""Hash password on the fly."""
if isinstance(password, unicode):
password_8bit = password.encode('ascii', 'ignore')
else:
password_8bit = password
salt = hashlib.sha1(os.urandom(60))
hash = hashlib.sha1(password_8bit + salt.hexdigest())
hashed_password = salt.hexdigest() + hash.hexdigest()
if not isinstance(hashed_password, unicode):
hashed_password = hashed_password.decode('utf-8')
self._password = hashed_password
# Invalidate temporary password recovery codes
self.reset_code = None
self.welcome_code = None
def _get_password(self):
"""Return the password hashed"""
return self._password
def validate_password(self, password):
"""
Check the password against existing credentials.
:param password: the password that was provided by the user to
try and authenticate. This is the clear text version that we will
need to match against the hashed one in the database.
:type password: unicode object.
:return: Whether the password is valid.
:rtype: bool
"""
if self.password is None:
return False
if isinstance(password, unicode):
password_8bit = password.encode('ascii', 'ignore')
else:
password_8bit = password
if self.banned or self.delete_time:
return False
hashed_pass = hashlib.sha1(password_8bit + self.password[:40])
return self.password[40:] == hashed_pass.hexdigest()
password = property(_get_password, _set_password)
def initialize_welcome(self):
""" Sign up the user for the welcome feature (on user import or so) """
import adhocracy.lib.util as util
self.welcome_code = util.random_token()
self._password = None
def current_agencies(self, instance_filter=True):
ds = filter(lambda d: not d.is_revoked(), self.agencies)
if ifilter.has_instance() and instance_filter:
ds = filter(lambda d: d.scope.instance == ifilter.get_instance(),
ds)
return ds
def current_delegated(self, instance_filter=True):
ds = filter(lambda d: not d.is_revoked(), self.delegated)
if ifilter.has_instance() and instance_filter:
ds = filter(lambda d: d.scope.instance == ifilter.get_instance(),
ds)
return ds
@classmethod
def complete(cls, prefix, limit=5, instance_filter=True):
q = meta.Session.query(User)
prefix = prefix.lower()
q = q.filter(or_(func.lower(User.user_name).like(prefix + u"%"),
func.lower(User.display_name).like(prefix + u"%")))
q = q.limit(limit)
completions = q.all()
if ifilter.has_instance() and instance_filter:
inst = ifilter.get_instance()
completions = filter(lambda u: u.is_member(inst), completions)
return completions
@classmethod
# @meta.session_cached
def find(cls, user_name, instance_filter=True, include_deleted=False):
from membership import Membership
try:
q = meta.Session.query(User)
try:
q = q.filter(User.id == int(user_name))
except ValueError:
q = q.filter(User.user_name == unicode(user_name))
if not include_deleted:
q = q.filter(or_(User.delete_time == None, # noqa
User.delete_time > datetime.utcnow()))
if ifilter.has_instance() and instance_filter:
q = q.join(Membership)
q = q.filter(or_(Membership.expire_time == None, # noqa
Membership.expire_time > datetime.utcnow()))
q = q.filter(Membership.instance == ifilter.get_instance())
return q.limit(1).first()
except Exception, e:
log.debug("find(%s): %s" % (user_name, e))
return None
@classmethod
def find_by_email(cls, email, include_deleted=False):
return cls.all_q(None, include_deleted)\
.filter(func.lower(User.email) == unicode(email).lower())\
.limit(1).first()
@classmethod
def find_by_user_name(cls, user_name, include_deleted=False):
return cls.find(user_name, include_deleted=include_deleted)
@classmethod
def find_by_shibboleth(cls, persistent_id, include_deleted=False):
from shibboleth import Shibboleth
return cls.all_q(None, include_deleted)\
.join(Shibboleth)\
.filter(Shibboleth.persistent_id == persistent_id)\
.limit(1).first()
@classmethod
def find_all(cls, unames, instance_filter=True, include_deleted=False):
from membership import Membership
q = meta.Session.query(User)
q = q.filter(User.user_name.in_(unames))
if not include_deleted:
q = q.filter(or_(User.delete_time == None, # noqa
User.delete_time > datetime.utcnow()))
if ifilter.has_instance() and instance_filter:
q = q.join(Membership)
q = q.filter(or_(Membership.expire_time == None, # noqa
Membership.expire_time > datetime.utcnow()))
q = q.filter(Membership.instance == ifilter.get_instance())
# log.debug("QueryAll: %s" % q)
# log.debug("LEN: %s" % len(q.all()))
return q.all()
_index_id_attr = 'user_name'
@classmethod
def all_q(cls, instance=None, include_deleted=False):
from membership import Membership
q = meta.Session.query(User)
if not include_deleted:
q = q.filter(or_(User.delete_time == None, # noqa
User.delete_time > datetime.utcnow()))
if instance:
q = q.options(eagerload_all('memberships'))
q = q.join(Membership)
q = q.filter(or_(Membership.expire_time == None, # noqa
Membership.expire_time > datetime.utcnow()))
q = q.filter(Membership.instance == instance)
return q
@classmethod
def all(cls, instance=None, include_deleted=False):
return cls.all_q(instance=instance,
include_deleted=include_deleted).all()
def delete(self, delete_time=None):
from watch import Watch
if delete_time is None:
delete_time = datetime.utcnow()
self.revoke_delegations()
for twitter in self.twitters:
twitter.delete(delete_time=delete_time)
for openid in self.openids:
openid.delete(delete_time=delete_time)
for velruse in self.velruse:
velruse.delete(delete_time=delete_time)
for comment in self.comments:
comment.delete(delete_time=delete_time)
for membership in self.memberships:
membership.delete(delete_time=delete_time)
for watch in Watch.all_by_user(self):
watch.delete(delete_time=delete_time)
# for vote in self.votes:
# vote.delete(delete_time=delete_time)
self.delete_time = delete_time
def undelete(self):
from watch import Watch
for twitter in self.twitters:
twitter.delete_time = None
for openid in self.openids:
openid.delete_time = None
for velruse in self.velruse:
velruse.delete_time = None
for comment in self.comments:
comment.delete_time = None
for membership in self.memberships:
membership.expire_time = None
for watch in Watch.all_by_user(self):
watch.delete_time = None
self.delete_time = None
def is_deleted(self, at_time=None):
if at_time is None:
at_time = datetime.utcnow()
return ((self.delete_time is not None) and
self.delete_time <= at_time)
def revoke_delegations(self, instance=None):
from delegation import Delegation
q = meta.Session.query(Delegation)
q = q.filter(or_(Delegation.agent == self,
Delegation.principal == self))
q = q.filter(or_(Delegation.revoke_time == None, # noqa
Delegation.revoke_time > datetime.utcnow()))
for delegation in q:
if instance is None or delegation.scope.instance == instance:
delegation.revoke()
def is_email_activated(self):
return self.email is not None and self.activation_code is None
def set_email_verified(self):
# for adhocracy, None means email is verified
self.activation_code = None
meta.Session.commit()
def delegation_node(self, scope):
from adhocracy.lib.democracy import DelegationNode
return DelegationNode(self, scope)
def number_of_votes_in_scope(self, scope):
"""
May be a bit too much as multiple delegations are counted for each user
they are delegated to. (This is the safety net delegation)
"""
if not self._has_permission('vote.cast'):
return 0
return self.delegation_node(scope).number_of_delegations() + 1
def position_on_poll(self, poll):
from adhocracy.lib.democracy.decision import Decision
return Decision(self, poll).result
def any_position_on_proposal(self, proposal):
# this is fuzzy since it includes two types of opinions
from adhocracy.lib.democracy.decision import Decision
if proposal.adopt_poll:
dec = Decision(self, proposal.adopt_poll)
if dec.is_decided():
return dec.result
if proposal.rate_poll:
return Decision(self, proposal.rate_poll).result
@classmethod
def create(cls, user_name, email, password=None, locale=None,
openid_identity=None, global_admin=False, display_name=None,
autojoin=True, shibboleth_persistent_id=None,
omit_activation_code=False):
"""
Create a user. If user_name is None, a random user name is generated.
"""
from group import Group
from membership import Membership
import adhocracy.lib.util as util
if password is None:
password = util.random_token()
import adhocracy.i18n as i18n
if locale is None:
locale = i18n.get_default_locale()
while user_name is None:
# Note: This can theoretically lead to IntegrityErrors if the same
# username is generated at the same time. This is very unlikely
# though.
from adhocracy.lib.util import random_username
try_user_name = random_username()
if cls.find(try_user_name) is None:
user_name = try_user_name
from adhocracy.lib import helpers as h
h.flash(_('The random username %s has been assigned to you.') %
user_name, 'success')
user = User(user_name, email, password, locale,
display_name=display_name)
meta.Session.add(user)
# Add the global default group
default_group = Group.by_code(Group.CODE_DEFAULT)
default_membership = Membership(user, None, default_group)
meta.Session.add(default_membership)
# Autojoin the user in instances
config_autojoin = config.get('adhocracy.instances.autojoin')
if autojoin and config_autojoin:
user.fix_autojoin(commit=False)
if global_admin:
admin_group = Group.by_code(Group.CODE_ADMIN)
admin_membership = Membership(user, None, admin_group)
meta.Session.add(admin_membership)
if openid_identity is not None:
from adhocracy.model.openid import OpenID
openid = OpenID(unicode(openid_identity), user)
meta.Session.add(openid)
if shibboleth_persistent_id is not None:
from adhocracy.model.shibboleth import Shibboleth
shib = Shibboleth(shibboleth_persistent_id, user)
meta.Session.add(shib)
meta.Session.flush()
return user
def to_dict(self):
from adhocracy.lib import helpers as h
d = dict(id=self.id,
user_name=self.user_name,
locale=self._locale,
url=h.entity_url(self),
create_time=self.create_time,
mbox=self.email_hash)
if self.display_name:
d['display_name'] = self.display_name
if self.bio:
d['bio'] = self.bio
# d['memberships'] = map(lambda m: m.instance.key,
# self.memberships)
return d
def to_index(self):
index = super(User, self).to_index()
index.update(dict(
title=self.name,
tag=[self.user_name],
body=self.bio,
user=self.user_name,
))
return index
def __repr__(self):
return u"<User(%s,%s)>" % (self.id, self.user_name)
@property
def title(self):
return self.name
def fix_autojoin(self, commit=True):
from membership import Membership
config_autojoin = config.get('adhocracy.instances.autojoin')
if config_autojoin == 'ALL':
instances = Instance.all(include_hidden=True)
else:
instance_keys = [key.strip() for key in
config_autojoin.split(",")]
instances = meta.Session.query(Instance)\
.filter(Instance.key.in_(instance_keys)).all()
to_join = set(instances)
added = 0
for m in self.memberships:
to_join.discard(m.instance)
for instance in to_join:
autojoin_membership = Membership(
self, instance,
instance.default_group)
meta.Session.add(autojoin_membership)
added += 1
if commit:
meta.Session.commit()
return added
|
tobspr/panda3d | refs/heads/master | direct/src/p3d/Packager.py | 5 | """ This module is used to build a "Package", a collection of files
within a Panda3D Multifile, which can be easily be downloaded and/or
patched onto a client machine, for the purpose of running a large
application. """
__all__ = ["Packager", "PackagerError", "OutsideOfPackageError", "ArgumentError"]
# Important to import panda3d first, to avoid naming conflicts with
# Python's "string" and "Loader" names that are imported later.
from panda3d.core import *
import sys
import os
import glob
import struct
import subprocess
import copy
from direct.p3d.FileSpec import FileSpec
from direct.p3d.SeqValue import SeqValue
from direct.p3d.HostInfo import HostInfo
from direct.showbase import Loader
from direct.showbase import AppRunnerGlobal
from direct.showutil import FreezeTool
from direct.directnotify.DirectNotifyGlobal import *
vfs = VirtualFileSystem.getGlobalPtr()
class PackagerError(Exception):
pass
class OutsideOfPackageError(PackagerError):
pass
class ArgumentError(PackagerError):
pass
class Packager:
notify = directNotify.newCategory("Packager")
class PackFile:
def __init__(self, package, filename,
newName = None, deleteTemp = False,
explicit = False, compress = None, extract = None,
text = None, unprocessed = None,
executable = None, dependencyDir = None,
platformSpecific = None, required = False):
assert isinstance(filename, Filename)
self.filename = Filename(filename)
self.newName = newName
self.deleteTemp = deleteTemp
self.explicit = explicit
self.compress = compress
self.extract = extract
self.text = text
self.unprocessed = unprocessed
self.executable = executable
self.dependencyDir = dependencyDir
self.platformSpecific = platformSpecific
self.required = required
if not self.newName:
self.newName = str(self.filename)
ext = Filename(self.newName).getExtension()
if ext == 'pz' or ext == 'gz':
# Strip off a .pz extension; we can compress files
# within the Multifile without it.
filename = Filename(self.newName)
filename.setExtension('')
self.newName = str(filename)
ext = Filename(self.newName).getExtension()
if self.compress is None:
self.compress = True
packager = package.packager
if self.compress is None:
self.compress = (ext not in packager.uncompressibleExtensions and ext not in packager.imageExtensions)
if self.executable is None:
self.executable = (ext in packager.executableExtensions)
if self.executable and self.dependencyDir is None:
# By default, install executable dependencies in the
# root directory, which is the one that's added to PATH.
self.dependencyDir = ''
if self.extract is None:
self.extract = self.executable or (ext in packager.extractExtensions)
if self.platformSpecific is None:
self.platformSpecific = self.executable or (ext in packager.platformSpecificExtensions)
if self.unprocessed is None:
self.unprocessed = self.executable or (ext in packager.unprocessedExtensions)
if self.executable:
# Look up the filename along the system PATH, if necessary.
if not packager.resolveLibrary(self.filename):
# If it wasn't found, try looking it up under its
# basename only. Sometimes a Mac user will copy
# the library file out of a framework and put that
# along the PATH, instead of the framework itself.
basename = Filename(self.filename.getBasename())
if packager.resolveLibrary(basename):
self.filename = basename
if ext in packager.textExtensions and not self.executable:
self.filename.setText()
else:
self.filename.setBinary()
# Convert the filename to an unambiguous filename for
# searching.
self.filename.makeTrueCase()
if self.filename.exists() or not self.filename.isLocal():
self.filename.makeCanonical()
def isExcluded(self, package):
""" Returns true if this file should be excluded or
skipped, false otherwise. """
if self.newName.lower() in package.skipFilenames:
return True
if not self.explicit:
# Make sure it's not one of our auto-excluded system
# files. (But only make this check if this file was
# not explicitly added.)
basename = Filename(self.newName).getBasename()
if not package.packager.caseSensitive:
basename = basename.lower()
if basename in package.packager.excludeSystemFiles:
return True
for exclude in package.packager.excludeSystemGlobs:
if exclude.matches(basename):
return True
# Also check if it was explicitly excluded. As above,
# omit this check for an explicitly-added file: if you
# both include and exclude a file, the file is
# included.
for exclude in package.excludedFilenames:
if exclude.matches(self.filename):
return True
# A platform-specific file is implicitly excluded from
# not-platform-specific packages.
if self.platformSpecific and package.platformSpecificConfig is False:
return True
return False
class ExcludeFilename:
def __init__(self, packager, filename, caseSensitive):
self.packager = packager
self.localOnly = (not filename.getDirname())
if not self.localOnly:
filename = Filename(filename)
filename.makeCanonical()
self.glob = GlobPattern(str(filename))
if self.packager.platform.startswith('win'):
self.glob.setCaseSensitive(False)
elif self.packager.platform.startswith('osx'):
self.glob.setCaseSensitive(False)
def matches(self, filename):
if self.localOnly:
return self.glob.matches(filename.getBasename())
else:
return self.glob.matches(str(filename))
class PackageEntry:
""" This corresponds to a <package> entry in the contents.xml
file. """
def __init__(self):
# The "seq" value increments automatically with each publish.
self.packageSeq = SeqValue()
# The "set_ver" value is optionally specified in the pdef
# file and does not change unless the user says it does.
self.packageSetVer = SeqValue()
def getKey(self):
""" Returns a tuple used for sorting the PackageEntry
objects uniquely per package. """
return (self.packageName, self.platform or "", self.version or "")
def fromFile(self, packageName, platform, version, solo, perPlatform,
installDir, descFilename, importDescFilename):
self.packageName = packageName
self.platform = platform
self.version = version
self.solo = solo
self.perPlatform = perPlatform
self.descFile = FileSpec()
self.descFile.fromFile(installDir, descFilename)
self.importDescFile = None
if importDescFilename:
self.importDescFile = FileSpec()
self.importDescFile.fromFile(installDir, importDescFilename)
def loadXml(self, xpackage):
self.packageName = xpackage.Attribute('name')
self.platform = xpackage.Attribute('platform')
self.version = xpackage.Attribute('version')
solo = xpackage.Attribute('solo')
self.solo = int(solo or '0')
perPlatform = xpackage.Attribute('per_platform')
self.perPlatform = int(perPlatform or '0')
self.packageSeq = SeqValue()
self.packageSeq.loadXml(xpackage, 'seq')
self.packageSetVer = SeqValue()
self.packageSetVer.loadXml(xpackage, 'set_ver')
self.descFile = FileSpec()
self.descFile.loadXml(xpackage)
self.importDescFile = None
ximport = xpackage.FirstChildElement('import')
if ximport:
self.importDescFile = FileSpec()
self.importDescFile.loadXml(ximport)
def makeXml(self):
""" Returns a new TiXmlElement. """
xpackage = TiXmlElement('package')
xpackage.SetAttribute('name', self.packageName)
if self.platform:
xpackage.SetAttribute('platform', self.platform)
if self.version:
xpackage.SetAttribute('version', self.version)
if self.solo:
xpackage.SetAttribute('solo', '1')
if self.perPlatform:
xpackage.SetAttribute('per_platform', '1')
self.packageSeq.storeXml(xpackage, 'seq')
self.packageSetVer.storeXml(xpackage, 'set_ver')
self.descFile.storeXml(xpackage)
if self.importDescFile:
ximport = TiXmlElement('import')
self.importDescFile.storeXml(ximport)
xpackage.InsertEndChild(ximport)
return xpackage
class HostEntry:
def __init__(self, url = None, downloadUrl = None,
descriptiveName = None, hostDir = None,
mirrors = None):
self.url = url
self.downloadUrl = downloadUrl
self.descriptiveName = descriptiveName
self.hostDir = hostDir
self.mirrors = mirrors or []
self.altHosts = {}
def loadXml(self, xhost, packager):
self.url = xhost.Attribute('url')
self.downloadUrl = xhost.Attribute('download_url')
self.descriptiveName = xhost.Attribute('descriptive_name')
self.hostDir = xhost.Attribute('host_dir')
self.mirrors = []
xmirror = xhost.FirstChildElement('mirror')
while xmirror:
url = xmirror.Attribute('url')
self.mirrors.append(url)
xmirror = xmirror.NextSiblingElement('mirror')
xalthost = xhost.FirstChildElement('alt_host')
while xalthost:
url = xalthost.Attribute('url')
he = packager.addHost(url)
he.loadXml(xalthost, packager)
xalthost = xalthost.NextSiblingElement('alt_host')
def makeXml(self, packager = None):
""" Returns a new TiXmlElement. """
xhost = TiXmlElement('host')
xhost.SetAttribute('url', self.url)
if self.downloadUrl and self.downloadUrl != self.url:
xhost.SetAttribute('download_url', self.downloadUrl)
if self.descriptiveName:
xhost.SetAttribute('descriptive_name', self.descriptiveName)
if self.hostDir:
xhost.SetAttribute('host_dir', self.hostDir)
for mirror in self.mirrors:
xmirror = TiXmlElement('mirror')
xmirror.SetAttribute('url', mirror)
xhost.InsertEndChild(xmirror)
if packager:
altHosts = sorted(self.altHosts.items())
for keyword, alt in altHosts:
he = packager.hosts.get(alt, None)
if he:
xalthost = he.makeXml()
xalthost.SetValue('alt_host')
xalthost.SetAttribute('keyword', keyword)
xhost.InsertEndChild(xalthost)
return xhost
class Package:
""" This is the full information on a particular package we
are constructing. Don't confuse it with PackageEntry, above,
which contains only the information found in the toplevel
contents.xml file."""
def __init__(self, packageName, packager):
self.packageName = packageName
self.packager = packager
self.notify = packager.notify
# The platform is initially None until we know the file is
# platform-specific.
self.platform = None
# This is always true on modern packages.
self.perPlatform = True
# The arch string, though, is pre-loaded from the system
# arch string, so we can sensibly call otool.
self.arch = self.packager.arch
self.version = None
self.host = None
self.p3dApplication = False
self.solo = False
self.compressionLevel = 0
self.importedMapsDir = 'imported_maps'
self.mainModule = None
self.signParams = []
self.requires = []
# This may be set explicitly in the pdef file to a
# particular sequence value.
self.packageSetVer = SeqValue()
# This is the set of config variables assigned to the
# package.
self.configs = {}
# This is the set of files and modules, already included
# by required packages, that we can skip.
self.skipFilenames = {}
self.skipModules = {}
# This is a list of ExcludeFilename objects, representing
# the files that have been explicitly excluded.
self.excludedFilenames = []
# This is the list of files we will be adding, and a pair
# of cross references.
self.files = []
self.sourceFilenames = {}
self.targetFilenames = {}
# This is the set of files and modules that are
# required and may not be excluded from the package.
self.requiredFilenames = []
self.requiredModules = []
# A list of required packages that were missing.
self.missingPackages = []
# This records the current list of modules we have added so
# far.
self.freezer = FreezeTool.Freezer(platform = self.packager.platform)
self.freezer.storePythonSource = self.packager.storePythonSource
# Map of extensions to files to number (ignored by dir)
self.ignoredDirFiles = {}
def close(self):
""" Writes out the contents of the current package. Returns True
if the package was constructed successfully, False if one or more
required files or modules are missing. """
if not self.p3dApplication and not self.packager.allowPackages:
message = 'Cannot generate packages without an installDir; use -i'
raise PackagerError(message)
if self.ignoredDirFiles:
exts = sorted(self.ignoredDirFiles.keys())
total = sum([x for x in self.ignoredDirFiles.values()])
self.notify.warning("excluded %s files not marked for inclusion: %s" \
% (total, ", ".join(["'" + ext + "'" for ext in exts])))
if not self.host:
self.host = self.packager.host
# Check the version config variable.
version = self.configs.get('version', None)
if version is not None:
self.version = version
del self.configs['version']
# Check the platform_specific config variable. This has
# only three settings: None (unset), True, or False.
self.platformSpecificConfig = self.configs.get('platform_specific', None)
if self.platformSpecificConfig is not None:
# First, convert it to an int, in case it's "0" or "1".
try:
self.platformSpecificConfig = int(self.platformSpecificConfig)
except ValueError:
pass
# Then, make it a bool.
self.platformSpecificConfig = bool(self.platformSpecificConfig)
del self.configs['platform_specific']
# A special case when building the "panda3d" package. We
# enforce that the version number matches what we've been
# compiled with.
if self.packageName == 'panda3d':
if self.version is None:
self.version = PandaSystem.getPackageVersionString()
if self.version != PandaSystem.getPackageVersionString():
message = 'mismatched Panda3D version: requested %s, but Panda3D is built as %s' % (self.version, PandaSystem.getPackageVersionString())
raise PackagerError(message)
if self.host != PandaSystem.getPackageHostUrl():
message = 'mismatched Panda3D host: requested %s, but Panda3D is built as %s' % (self.host, PandaSystem.getPackageHostUrl())
raise PackagerError(message)
if self.p3dApplication:
# Default compression level for an app.
self.compressionLevel = 6
# Every p3dapp requires panda3d.
if 'panda3d' not in [p.packageName for p in self.requires]:
assert not self.packager.currentPackage
self.packager.currentPackage = self
self.packager.do_require('panda3d')
self.packager.currentPackage = None
# If this flag is set, enable allow_python_dev.
if self.packager.allowPythonDev:
self.configs['allow_python_dev'] = True
if not self.p3dApplication and not self.version:
# If we don't have an implicit version, inherit the
# version from the 'panda3d' package on our require
# list.
for p2 in self.requires:
if p2.packageName == 'panda3d' and p2.version:
self.version = p2.version
break
if self.solo:
result = self.installSolo()
else:
result = self.installMultifile()
if self.p3dApplication:
allowPythonDev = self.configs.get('allow_python_dev', 0)
if int(allowPythonDev):
print("\n*** Generating %s.p3d with allow_python_dev enabled ***\n" % (self.packageName))
return result
def considerPlatform(self):
# Check to see if any of the files are platform-specific,
# making the overall package platform-specific.
platformSpecific = self.platformSpecificConfig
for file in self.files:
if file.isExcluded(self):
# Skip this file.
continue
if file.platformSpecific:
platformSpecific = True
if platformSpecific and self.platformSpecificConfig is not False:
if not self.platform:
self.platform = self.packager.platform
if self.platform and self.platform.startswith('osx_'):
# Get the OSX "arch" specification.
self.arch = self.platform[4:]
def installMultifile(self):
""" Installs the package, either as a p3d application, or
as a true package. Either is implemented with a
Multifile. """
if self.missingPackages:
missing = ', '.join([name for name, version in self.missingPackages])
self.notify.warning("Cannot build package %s due to missing dependencies: %s" % (self.packageName, missing))
self.cleanup()
return False
self.multifile = Multifile()
# Write the multifile to a temporary filename until we
# know enough to determine the output filename.
multifileFilename = Filename.temporary('', self.packageName + '.', '.mf')
self.multifile.openReadWrite(multifileFilename)
if self.p3dApplication:
# p3d files should be tagged to make them executable.
self.multifile.setHeaderPrefix('#! /usr/bin/env panda3d\n')
else:
# Package multifiles might be patched, and therefore
# don't want to record an internal timestamp, which
# would make patching less efficient.
self.multifile.setRecordTimestamp(False)
# Make sure that all required files are present.
missing = []
for file in self.requiredFilenames:
if file not in self.files or file.isExcluded(self):
missing.append(file.filename.getBasename())
if len(missing) > 0:
self.notify.warning("Cannot build package %s, missing required files: %r" % (self.packageName, missing))
self.cleanup()
return False
self.extracts = []
self.components = []
# Add the explicit py files that were requested by the
# pdef file. These get turned into Python modules.
for file in self.files:
if file.isExcluded(self):
# Skip this file.
continue
if file.unprocessed:
# Unprocessed files get dealt with below.
continue
ext = Filename(file.newName).getExtension()
if ext == 'dc':
# Add the modules named implicitly in the dc file.
self.addDcImports(file)
elif ext == 'py':
self.addPyFile(file)
# Add the main module, if any.
if not self.mainModule and self.p3dApplication:
message = 'No main_module specified for application %s' % (self.packageName)
raise PackagerError(message)
if self.mainModule:
moduleName, newName = self.mainModule
if newName not in self.freezer.modules:
self.freezer.addModule(moduleName, newName = newName)
# Now all module files have been added. Exclude modules
# already imported in a required package, and not
# explicitly included by this package.
for moduleName, mdef in self.skipModules.items():
if moduleName not in self.freezer.modules:
self.freezer.excludeModule(
moduleName, allowChildren = mdef.allowChildren,
forbid = mdef.forbid, fromSource = 'skip')
# Pick up any unfrozen Python files.
self.freezer.done()
# But first, make sure that all required modules are present.
missing = []
moduleDict = dict(self.freezer.getModuleDefs())
for module in self.requiredModules:
if module not in moduleDict:
missing.append(module)
if len(missing) > 0:
self.notify.warning("Cannot build package %s, missing required modules: %r" % (self.packageName, missing))
self.cleanup()
return False
# OK, we can add it.
self.freezer.addToMultifile(self.multifile, self.compressionLevel)
self.addExtensionModules()
# Add known module names.
self.moduleNames = {}
modules = sorted(self.freezer.modules.items())
for newName, mdef in modules:
if mdef.guess:
# Not really a module.
continue
if mdef.fromSource == 'skip':
# This record already appeared in a required
# module; don't repeat it now.
continue
if mdef.exclude and mdef.implicit:
# Don't bother mentioning implicitly-excluded
# (i.e. missing) modules.
continue
#if newName == '__main__':
# # Ignore this special case.
# continue
self.moduleNames[newName] = mdef
xmodule = TiXmlElement('module')
xmodule.SetAttribute('name', newName)
if mdef.exclude:
xmodule.SetAttribute('exclude', '1')
if mdef.forbid:
xmodule.SetAttribute('forbid', '1')
if mdef.exclude and mdef.allowChildren:
xmodule.SetAttribute('allowChildren', '1')
self.components.append(('m', newName.lower(), xmodule))
# Now look for implicit shared-library dependencies.
if self.packager.platform.startswith('win'):
self.__addImplicitDependenciesWindows()
elif self.packager.platform.startswith('osx'):
self.__addImplicitDependenciesOSX()
else:
self.__addImplicitDependenciesPosix()
# Now add all the real, non-Python files (except model
# files). This will include the extension modules we just
# discovered above.
for file in self.files:
if file.isExcluded(self):
# Skip this file.
continue
ext = Filename(file.newName).getExtension()
if file.unprocessed:
# Add an unprocessed file verbatim.
self.addComponent(file)
elif ext == 'py':
# Already handled, above.
pass
elif file.isExcluded(self):
# Skip this file.
pass
elif ext == 'egg' or ext == 'bam':
# Skip model files this pass.
pass
elif ext == 'dc':
# dc files get a special treatment.
self.addDcFile(file)
elif ext == 'prc':
# So do prc files.
self.addPrcFile(file)
else:
# Any other file.
self.addComponent(file)
# Now add the model files. It's important to add these
# after we have added all of the texture files, so we can
# determine which textures need to be implicitly pulled
# in.
# We walk through a copy of the files list, since we might
# be adding more files (textures) to this list as we
# discover them in model files referenced in this list.
for file in self.files[:]:
if file.isExcluded(self):
# Skip this file.
continue
ext = Filename(file.newName).getExtension()
if file.unprocessed:
# Already handled, above.
pass
elif ext == 'py':
# Already handled, above.
pass
elif file.isExcluded(self):
# Skip this file.
pass
elif ext == 'egg':
self.addEggFile(file)
elif ext == 'bam':
self.addBamFile(file)
else:
# Handled above.
pass
# Check to see if we should be platform-specific.
self.considerPlatform()
# Now that we've processed all of the component files,
# (and set our platform if necessary), we can generate the
# output filename and write the output files.
self.packageBasename = self.packageName
packageDir = self.packageName
if self.version:
self.packageBasename += '.' + self.version
packageDir += '/' + self.version
if self.platform:
self.packageBasename += '.' + self.platform
packageDir += '/' + self.platform
self.packageDesc = self.packageBasename + '.xml'
self.packageImportDesc = self.packageBasename + '.import.xml'
if self.p3dApplication:
self.packageBasename += self.packager.p3dSuffix
self.packageBasename += '.p3d'
packageDir = ''
else:
self.packageBasename += '.mf'
packageDir += '/'
self.packageDir = packageDir
self.packageFilename = packageDir + self.packageBasename
self.packageDesc = packageDir + self.packageDesc
self.packageImportDesc = packageDir + self.packageImportDesc
print("Generating %s" % (self.packageFilename))
if self.p3dApplication:
self.packageFullpath = Filename(self.packager.p3dInstallDir, self.packageFilename)
self.packageFullpath.makeDir()
self.makeP3dInfo()
else:
self.packageFullpath = Filename(self.packager.installDir, self.packageFilename)
self.packageFullpath.makeDir()
self.multifile.repack()
# Also sign the multifile before we close it.
for certificate, chain, pkey, password in self.signParams:
self.multifile.addSignature(certificate, chain or '', pkey or '', password or '')
self.multifile.close()
if not multifileFilename.renameTo(self.packageFullpath):
self.notify.error("Cannot move %s to %s" % (multifileFilename, self.packageFullpath))
if self.p3dApplication:
# No patches for an application; just move it into place.
# Make the application file executable.
os.chmod(self.packageFullpath.toOsSpecific(), 0o755)
else:
self.readDescFile()
self.packageSeq += 1
self.perPlatform = True # always true on modern packages.
self.compressMultifile()
self.writeDescFile()
self.writeImportDescFile()
# Now that we've written out the desc file, we don't
# need to keep around the uncompressed archive
# anymore.
self.packageFullpath.unlink()
# Replace or add the entry in the contents.
pe = Packager.PackageEntry()
pe.fromFile(self.packageName, self.platform, self.version,
False, self.perPlatform, self.packager.installDir,
self.packageDesc, self.packageImportDesc)
pe.packageSeq = self.packageSeq
pe.packageSetVer = self.packageSetVer
self.packager.contents[pe.getKey()] = pe
self.packager.contentsChanged = True
self.cleanup()
return True
def installSolo(self):
""" Installs the package as a "solo", which means we
simply copy the one file into the install directory. This
is primarily intended for the "coreapi" plugin, which is
just a single dll and a jpg file; but it can support other
kinds of similar "solo" packages as well. """
self.considerPlatform()
self.perPlatform = False # Not true on "solo" packages.
packageDir = self.packageName
if self.platform:
packageDir += '/' + self.platform
if self.version:
packageDir += '/' + self.version
if not self.packager.allowPackages:
message = 'Cannot generate packages without an installDir; use -i'
raise PackagerError(message)
installPath = Filename(self.packager.installDir, packageDir)
# Remove any files already in the installPath.
origFiles = vfs.scanDirectory(installPath)
if origFiles:
for origFile in origFiles:
origFile.getFilename().unlink()
files = []
for file in self.files:
if file.isExcluded(self):
# Skip this file.
continue
files.append(file)
if not files:
# No files, never mind.
return
if len(files) != 1:
raise PackagerError('Multiple files in "solo" package %s' % (self.packageName))
Filename(installPath, '').makeDir()
file = files[0]
targetPath = Filename(installPath, file.newName)
targetPath.setBinary()
file.filename.setBinary()
if not file.filename.copyTo(targetPath):
self.notify.warning("Could not copy %s to %s" % (
file.filename, targetPath))
# Replace or add the entry in the contents.
pe = Packager.PackageEntry()
pe.fromFile(self.packageName, self.platform, self.version,
True, self.perPlatform, self.packager.installDir,
Filename(packageDir, file.newName), None)
peOrig = self.packager.contents.get(pe.getKey(), None)
if peOrig:
pe.packageSeq = peOrig.packageSeq + 1
pe.packageSetVer = peOrig.packageSetVer
if self.packageSetVer:
pe.packageSetVer = self.packageSetVer
self.packager.contents[pe.getKey()] = pe
self.packager.contentsChanged = True
# Hack for coreapi package, to preserve backward compatibility
# with old versions of the runtime, which still called the
# 32-bit Windows platform "win32".
if self.packageName == "coreapi" and self.platform == "win_i386":
pe2 = copy.copy(pe)
pe2.platform = "win32"
self.packager.contents[pe2.getKey()] = pe2
self.cleanup()
return True
def cleanup(self):
# Now that all the files have been packed, we can delete
# the temporary files.
for file in self.files:
if file.deleteTemp:
file.filename.unlink()
def addFile(self, *args, **kw):
""" Adds the named file to the package. Returns the file
object, or None if it was not added by this call. """
file = Packager.PackFile(self, *args, **kw)
if file.filename in self.sourceFilenames:
# Don't bother, it's already here.
return None
lowerName = file.newName.lower()
if lowerName in self.targetFilenames:
# Another file is already in the same place.
file2 = self.targetFilenames[lowerName]
self.packager.notify.warning(
"%s is shadowing %s" % (file2.filename, file.filename))
return None
self.sourceFilenames[file.filename] = file
if file.required:
self.requiredFilenames.append(file)
if file.text is None and not file.filename.exists():
if not file.isExcluded(self):
self.packager.notify.warning("No such file: %s" % (file.filename))
return None
self.files.append(file)
self.targetFilenames[lowerName] = file
return file
def excludeFile(self, filename):
""" Excludes the named file (or glob pattern) from the
package. """
xfile = Packager.ExcludeFilename(self.packager, filename, self.packager.caseSensitive)
self.excludedFilenames.append(xfile)
def __addImplicitDependenciesWindows(self):
""" Walks through the list of files, looking for dll's and
exe's that might include implicit dependencies on other
dll's and assembly manifests. Tries to determine those
dependencies, and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
if file.filename.getExtension().lower() == "manifest":
filenames = self.__parseManifest(file.filename)
if filenames is None:
self.notify.warning("Unable to determine dependent assemblies from %s" % (file.filename))
continue
else:
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = 'dumpbin /dependents "%s" >"%s"' % (
file.filename.toOsSpecific(),
tempFile.toOsSpecific())
try:
os.system(command)
except:
pass
filenames = None
if tempFile.exists():
filenames = self.__parseDependenciesWindows(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (file.filename))
filenames = []
# Extract the manifest file so we can figure out
# the dependent assemblies.
tempFile = Filename.temporary('', 'p3d_', '.manifest')
resindex = 2
if file.filename.getExtension().lower() == "exe":
resindex = 1
command = 'mt -inputresource:"%s";#%d -out:"%s" > nul' % (
file.filename.toOsSpecific(),
resindex, tempFile.toOsSpecific())
try:
out = os.system(command)
except:
pass
afilenames = None
if tempFile.exists():
afilenames = self.__parseManifest(tempFile)
tempFile.unlink()
# Also check for an explicit private-assembly
# manifest file on disk.
mfile = file.filename + '.manifest'
if mfile.exists():
if afilenames is None:
afilenames = []
afilenames += self.__parseManifest(mfile)
# Since it's an explicit manifest file, it
# means we should include the manifest
# file itself in the package.
newName = Filename(file.dependencyDir, mfile.getBasename())
self.addFile(mfile, newName = str(newName),
explicit = False, executable = True)
if afilenames is None and out != 31:
self.notify.warning("Unable to determine dependent assemblies from %s" % (file.filename))
if afilenames is not None:
filenames += afilenames
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(file.filename.getDirname()))
for filename in filenames:
filename = Filename.fromOsSpecific(filename)
filename.resolveFilename(path)
filename.makeTrueCase()
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesWindows(self, tempFile):
""" Reads the indicated temporary file, the output from
dumpbin /dependents, to determine the list of dll's this
executable file depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
li = 0
while li < len(lines):
line = lines[li]
li += 1
if line.find(' has the following dependencies') != -1:
break
if li < len(lines):
line = lines[li]
if line.strip() == '':
# Skip a blank line.
li += 1
# Now we're finding filenames, until the next blank line.
filenames = []
while li < len(lines):
line = lines[li]
li += 1
line = line.strip()
if line == '':
# We're done.
return filenames
filenames.append(line)
# Hmm, we ran out of data. Oh well.
if not filenames:
# Some parse error.
return None
# At least we got some data.
return filenames
def __parseManifest(self, tempFile):
""" Reads the indicated application manifest file, to
determine the list of dependent assemblies this
executable file depends on. """
doc = TiXmlDocument(tempFile.toOsSpecific())
if not doc.LoadFile():
return None
assembly = doc.FirstChildElement("assembly")
if not assembly:
return None
# Pick up assemblies that it depends on
filenames = []
dependency = assembly.FirstChildElement("dependency")
while dependency:
depassembly = dependency.FirstChildElement("dependentAssembly")
if depassembly:
ident = depassembly.FirstChildElement("assemblyIdentity")
if ident:
name = ident.Attribute("name")
if name:
filenames.append(name + ".manifest")
dependency = dependency.NextSiblingElement("dependency")
# Pick up direct dll dependencies that it lists
dfile = assembly.FirstChildElement("file")
while dfile:
name = dfile.Attribute("name")
if name:
filenames.append(name)
dfile = dfile.NextSiblingElement("file")
return filenames
def __locateFrameworkLibrary(self, library):
""" Locates the given library inside its framework on the
default framework paths, and returns its location as Filename. """
# If it's already a full existing path, we
# don't search for it anymore, of course.
if Filename.fromOsSpecific(library).exists():
return Filename.fromOsSpecific(library)
# DSearchPath appears not to work for directories.
fpath = []
fpath.append(Filename("/Library/Frameworks"))
fpath.append(Filename("/System/Library/Frameworks"))
fpath.append(Filename("/Developer/Library/Frameworks"))
fpath.append(Filename(os.path.expanduser("~"), "Library/Frameworks"))
if "HOME" in os.environ:
fpath.append(Filename(os.environ["HOME"], "Library/Frameworks"))
ffilename = Filename(library.split('.framework/', 1)[0].split('/')[-1] + '.framework')
ffilename = Filename(ffilename, library.split('.framework/', 1)[-1])
# Look under the system root first, if supplied.
if self.packager.systemRoot:
for i in fpath:
fw = Filename(self.packager.systemRoot, i)
if Filename(fw, ffilename).exists():
return Filename(fw, ffilename)
for i in fpath:
if Filename(i, ffilename).exists():
return Filename(i, ffilename)
# Not found? Well, let's just return the framework + file
# path, the user will be presented with a warning later.
return ffilename
def __alterFrameworkDependencies(self, file, framework_deps):
""" Copies the given library file to a temporary directory,
and alters the dependencies so that it doesn't contain absolute
framework dependencies. """
if not file.deleteTemp:
# Copy the file to a temporary location because we
# don't want to modify the original (there's a big
# chance that we break it).
# Copy it every time, because the source file might
# have changed since last time we ran.
assert file.filename.exists(), "File doesn't exist: %s" % file.filename
tmpfile = Filename.temporary('', "p3d_" + file.filename.getBasename())
tmpfile.setBinary()
file.filename.copyTo(tmpfile)
file.filename = tmpfile
file.deleteTemp = True
# Alter the dependencies to have a relative path rather than absolute
for filename in framework_deps:
loc = self.__locateFrameworkLibrary(filename)
if loc == file.filename:
os.system('install_name_tool -id "%s" "%s"' % (os.path.basename(filename), file.filename.toOsSpecific()))
elif "/System/" in loc.toOsSpecific():
# Let's keep references to system frameworks absolute
os.system('install_name_tool -change "%s" "%s" "%s"' % (filename, loc.toOsSpecific(), file.filename.toOsSpecific()))
else:
os.system('install_name_tool -change "%s" "%s" "%s"' % (filename, os.path.basename(filename), file.filename.toOsSpecific()))
def __addImplicitDependenciesOSX(self):
""" Walks through the list of files, looking for dylib's
and executables that might include implicit dependencies
on other dylib's. Tries to determine those dependencies,
and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
origFilename = Filename(file.filename)
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = '/usr/bin/otool -arch all -L "%s" >"%s"' % (
origFilename.toOsSpecific(),
tempFile.toOsSpecific())
if self.arch:
arch = self.arch
if arch == "amd64":
arch = "x86_64"
command = '/usr/bin/otool -arch %s -L "%s" >"%s"' % (
arch,
origFilename.toOsSpecific(),
tempFile.toOsSpecific())
exitStatus = os.system(command)
if exitStatus != 0:
self.notify.warning('Command failed: %s' % (command))
filenames = None
if tempFile.exists():
filenames = self.__parseDependenciesOSX(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (origFilename))
continue
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(origFilename.getDirname()))
# Find the dependencies that are referencing a framework
framework_deps = []
for filename in filenames:
if '.framework/' in filename:
framework_deps.append(filename)
if len(framework_deps) > 0:
# Fixes dependencies like @executable_path/../Library/Frameworks/Cg.framework/Cg
self.__alterFrameworkDependencies(file, framework_deps)
for filename in filenames:
if '@loader_path' in filename:
filename = filename.replace('@loader_path', origFilename.getDirname())
if False and '.framework/' in filename:
# It references a framework, and besides the fact
# that those often contain absolute paths, they
# aren't commonly on the library path either.
filename = self.__locateFrameworkLibrary(filename)
filename.setBinary()
else:
# It's just a normal library - find it on the path.
filename = Filename.fromOsSpecific(filename)
filename.setBinary()
if filename.isLocal():
filename.resolveFilename(path)
else:
# It's a fully-specified filename; look
# for it under the system root first.
if self.packager.systemRoot:
f2 = Filename(self.packager.systemRoot, filename)
if f2.exists():
filename = f2
# Skip libraries and frameworks in system directory
if "/System/" in filename.toOsSpecific():
continue
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesOSX(self, tempFile):
""" Reads the indicated temporary file, the output from
otool -L, to determine the list of dylibs this
executable file depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
filenames = []
for line in lines:
if not line[0].isspace():
continue
line = line.strip()
s = line.find(' (compatibility')
if s != -1:
line = line[:s]
else:
s = line.find('.dylib')
if s != -1:
line = line[:s + 6]
else:
continue
filenames.append(line)
return filenames
def __readAndStripELF(self, file):
""" Reads the indicated ELF binary, and returns a list with
dependencies. If it contains data that should be stripped,
it writes the stripped library to a temporary file. Returns
None if the file failed to read (e.g. not an ELF file). """
# Read the first 16 bytes, which identify the ELF file.
elf = open(file.filename.toOsSpecific(), 'rb')
try:
ident = elf.read(16)
except IOError:
elf.close()
return None
if not ident.startswith(b"\177ELF"):
# No elf magic! Beware of orcs.
return None
# Make sure we read in the correct endianness and integer size
byteOrder = "<>"[ord(ident[5:6]) - 1]
elfClass = ord(ident[4:5]) - 1 # 0 = 32-bits, 1 = 64-bits
headerStruct = byteOrder + ("HHIIIIIHHHHHH", "HHIQQQIHHHHHH")[elfClass]
sectionStruct = byteOrder + ("4xI8xIII8xI", "4xI16xQQI12xQ")[elfClass]
dynamicStruct = byteOrder + ("iI", "qQ")[elfClass]
type, machine, version, entry, phoff, shoff, flags, ehsize, phentsize, phnum, shentsize, shnum, shstrndx \
= struct.unpack(headerStruct, elf.read(struct.calcsize(headerStruct)))
dynamicSections = []
stringTables = {}
# Seek to the section header table and find the .dynamic section.
elf.seek(shoff)
for i in range(shnum):
type, offset, size, link, entsize = struct.unpack_from(sectionStruct, elf.read(shentsize))
if type == 6 and link != 0: # DYNAMIC type, links to string table
dynamicSections.append((offset, size, link, entsize))
stringTables[link] = None
# Read the relevant string tables.
for idx in stringTables.keys():
elf.seek(shoff + idx * shentsize)
type, offset, size, link, entsize = struct.unpack_from(sectionStruct, elf.read(shentsize))
if type != 3: continue
elf.seek(offset)
stringTables[idx] = elf.read(size)
# Loop through the dynamic sections and rewrite it if it has an rpath/runpath.
rewriteSections = []
filenames = []
rpath = []
for offset, size, link, entsize in dynamicSections:
elf.seek(offset)
data = elf.read(entsize)
tag, val = struct.unpack_from(dynamicStruct, data)
newSectionData = b""
startReplace = None
pad = 0
# Read tags until we find a NULL tag.
while tag != 0:
if tag == 1: # A NEEDED entry. Read it from the string table.
filenames.append(stringTables[link][val : stringTables[link].find(b'\0', val)])
elif tag == 15 or tag == 29:
rpath += stringTables[link][val : stringTables[link].find(b'\0', val)].split(b':')
# An RPATH or RUNPATH entry.
if not startReplace:
startReplace = elf.tell() - entsize
if startReplace:
pad += entsize
elif startReplace is not None:
newSectionData += data
data = elf.read(entsize)
tag, val = struct.unpack_from(dynamicStruct, data)
if startReplace is not None:
newSectionData += data + (b"\0" * pad)
rewriteSections.append((startReplace, newSectionData))
elf.close()
# No rpaths/runpaths found, so nothing to do any more.
if len(rewriteSections) == 0:
return filenames
# Attempt to resolve any of the directly
# dependent filenames along the RPATH.
for f in range(len(filenames)):
filename = filenames[f]
for rdir in rpath:
if os.path.isfile(os.path.join(rdir, filename)):
filenames[f] = os.path.join(rdir, filename)
break
if not file.deleteTemp:
# Copy the file to a temporary location because we
# don't want to modify the original (there's a big
# chance that we break it).
tmpfile = Filename.temporary('', "p3d_" + file.filename.getBasename())
tmpfile.setBinary()
file.filename.copyTo(tmpfile)
file.filename = tmpfile
file.deleteTemp = True
# Open the temporary file and rewrite the dynamic sections.
elf = open(file.filename.toOsSpecific(), 'r+b')
for offset, data in rewriteSections:
elf.seek(offset)
elf.write(data)
elf.write(b"\0" * pad)
elf.close()
return filenames
def __addImplicitDependenciesPosix(self):
""" Walks through the list of files, looking for so's
and executables that might include implicit dependencies
on other so's. Tries to determine those dependencies,
and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
# Check if this is an ELF binary.
filenames = self.__readAndStripELF(file)
# If that failed, perhaps ldd will help us.
if filenames is None:
self.notify.warning("Reading ELF library %s failed, using ldd instead" % (file.filename))
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = 'ldd "%s" >"%s"' % (
file.filename.toOsSpecific(),
tempFile.toOsSpecific())
try:
os.system(command)
except:
pass
if tempFile.exists():
filenames = self.__parseDependenciesPosix(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (file.filename))
continue
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(file.filename.getDirname()))
for filename in filenames:
# These vDSO's provided by Linux aren't
# supposed to be anywhere on the system.
if filename in ["linux-gate.so.1", "linux-vdso.so.1"]:
continue
filename = Filename.fromOsSpecific(filename)
filename.resolveFilename(path)
filename.setBinary()
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesPosix(self, tempFile):
""" Reads the indicated temporary file, the output from
ldd, to determine the list of so's this executable file
depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
filenames = []
for line in lines:
line = line.strip()
s = line.find(' => ')
if s == -1:
continue
line = line[:s].strip()
filenames.append(line)
return filenames
def addExtensionModules(self):
""" Adds the extension modules detected by the freezer to
the current list of files. """
freezer = self.freezer
for moduleName, filename in freezer.extras:
filename = Filename.fromOsSpecific(filename)
newName = filename.getBasename()
if '.' in moduleName:
newName = '/'.join(moduleName.split('.')[:-1])
newName += '/' + filename.getBasename()
# Sometimes the PYTHONPATH has the wrong case in it.
filename.makeTrueCase()
self.addFile(filename, newName = newName,
explicit = False, extract = True,
executable = True,
platformSpecific = True)
freezer.extras = []
def makeP3dInfo(self):
""" Makes the p3d_info.xml file that defines the
application startup parameters and such. """
doc = TiXmlDocument()
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xpackage = TiXmlElement('package')
xpackage.SetAttribute('name', self.packageName)
if self.platform:
xpackage.SetAttribute('platform', self.platform)
if self.version:
xpackage.SetAttribute('version', self.version)
xpackage.SetAttribute('main_module', self.mainModule[1])
self.__addConfigs(xpackage)
requireHosts = {}
for package in self.requires:
xrequires = TiXmlElement('requires')
xrequires.SetAttribute('name', package.packageName)
if package.version:
xrequires.SetAttribute('version', package.version)
xrequires.SetAttribute('host', package.host)
package.packageSeq.storeXml(xrequires, 'seq')
package.packageSetVer.storeXml(xrequires, 'set_ver')
requireHosts[package.host] = True
xpackage.InsertEndChild(xrequires)
for host in requireHosts.keys():
he = self.packager.hosts.get(host, None)
if he:
xhost = he.makeXml(packager = self.packager)
xpackage.InsertEndChild(xhost)
self.extracts.sort()
for name, xextract in self.extracts:
xpackage.InsertEndChild(xextract)
doc.InsertEndChild(xpackage)
# Write the xml file to a temporary file on disk, so we
# can add it to the multifile.
filename = Filename.temporary('', 'p3d_', '.xml')
# This should really be setText() for an xml file, but it
# doesn't really matter that much since tinyxml can read
# it either way; and if we use setBinary() it will remain
# compatible with older versions of the core API that
# didn't understand the SF_text flag.
filename.setBinary()
doc.SaveFile(filename.toOsSpecific())
# It's important not to compress this file: the core API
# runtime can't decode compressed subfiles.
self.multifile.addSubfile('p3d_info.xml', filename, 0)
self.multifile.flush()
filename.unlink()
def compressMultifile(self):
""" Compresses the .mf file into an .mf.pz file. """
if self.oldCompressedBasename:
# Remove the previous compressed file first.
compressedPath = Filename(self.packager.installDir, Filename(self.packageDir, self.oldCompressedBasename))
compressedPath.unlink()
newCompressedFilename = '%s.pz' % (self.packageFilename)
# Now build the new version.
compressedPath = Filename(self.packager.installDir, newCompressedFilename)
if not compressFile(self.packageFullpath, compressedPath, 6):
message = 'Unable to write %s' % (compressedPath)
raise PackagerError(message)
def readDescFile(self):
""" Reads the existing package.xml file before rewriting
it. We need this to preserve the list of patches, and
similar historic data, between sessions. """
self.packageSeq = SeqValue()
self.packageSetVer = SeqValue()
self.patchVersion = None
self.patches = []
self.oldCompressedBasename = None
packageDescFullpath = Filename(self.packager.installDir, self.packageDesc)
doc = TiXmlDocument(packageDescFullpath.toOsSpecific())
if not doc.LoadFile():
return
xpackage = doc.FirstChildElement('package')
if not xpackage:
return
perPlatform = xpackage.Attribute('per_platform')
self.perPlatform = int(perPlatform or '0')
self.packageSeq.loadXml(xpackage, 'seq')
self.packageSetVer.loadXml(xpackage, 'set_ver')
xcompressed = xpackage.FirstChildElement('compressed_archive')
if xcompressed:
compressedFilename = xcompressed.Attribute('filename')
if compressedFilename:
self.oldCompressedBasename = compressedFilename
patchVersion = xpackage.Attribute('patch_version')
if not patchVersion:
patchVersion = xpackage.Attribute('last_patch_version')
if patchVersion:
self.patchVersion = patchVersion
# Extract the base_version, top_version, and patch
# entries, if any, and preserve these entries verbatim for
# the next version.
xbase = xpackage.FirstChildElement('base_version')
if xbase:
self.patches.append(xbase.Clone())
xtop = xpackage.FirstChildElement('top_version')
if xtop:
self.patches.append(xtop.Clone())
xpatch = xpackage.FirstChildElement('patch')
while xpatch:
self.patches.append(xpatch.Clone())
xpatch = xpatch.NextSiblingElement('patch')
def writeDescFile(self):
""" Makes the package.xml file that describes the package
and its contents, for download. """
packageDescFullpath = Filename(self.packager.installDir, self.packageDesc)
doc = TiXmlDocument(packageDescFullpath.toOsSpecific())
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xpackage = TiXmlElement('package')
xpackage.SetAttribute('name', self.packageName)
if self.platform:
xpackage.SetAttribute('platform', self.platform)
if self.version:
xpackage.SetAttribute('version', self.version)
if self.perPlatform:
xpackage.SetAttribute('per_platform', '1')
if self.patchVersion:
xpackage.SetAttribute('last_patch_version', self.patchVersion)
self.packageSeq.storeXml(xpackage, 'seq')
self.packageSetVer.storeXml(xpackage, 'set_ver')
self.__addConfigs(xpackage)
for package in self.requires:
xrequires = TiXmlElement('requires')
xrequires.SetAttribute('name', package.packageName)
if self.platform and package.platform:
xrequires.SetAttribute('platform', package.platform)
if package.version:
xrequires.SetAttribute('version', package.version)
package.packageSeq.storeXml(xrequires, 'seq')
package.packageSetVer.storeXml(xrequires, 'set_ver')
xrequires.SetAttribute('host', package.host)
xpackage.InsertEndChild(xrequires)
xuncompressedArchive = self.getFileSpec(
'uncompressed_archive', self.packageFullpath,
self.packageBasename)
xpackage.InsertEndChild(xuncompressedArchive)
xcompressedArchive = self.getFileSpec(
'compressed_archive', self.packageFullpath + '.pz',
self.packageBasename + '.pz')
xpackage.InsertEndChild(xcompressedArchive)
# Copy in the patch entries read from the previous version
# of the desc file.
for xpatch in self.patches:
xpackage.InsertEndChild(xpatch)
self.extracts.sort()
for name, xextract in self.extracts:
xpackage.InsertEndChild(xextract)
doc.InsertEndChild(xpackage)
doc.SaveFile()
def __addConfigs(self, xpackage):
""" Adds the XML config values defined in self.configs to
the indicated XML element. """
if self.configs:
xconfig = TiXmlElement('config')
for variable, value in self.configs.items():
if sys.version_info < (3, 0) and isinstance(value, unicode):
xconfig.SetAttribute(variable, value.encode('utf-8'))
elif isinstance(value, bool):
# True or False must be encoded as 1 or 0.
xconfig.SetAttribute(variable, str(int(value)))
else:
xconfig.SetAttribute(variable, str(value))
xpackage.InsertEndChild(xconfig)
def writeImportDescFile(self):
""" Makes the package.import.xml file that describes the
package and its contents, for other packages and
applications that may wish to "require" this one. """
packageImportDescFullpath = Filename(self.packager.installDir, self.packageImportDesc)
doc = TiXmlDocument(packageImportDescFullpath.toOsSpecific())
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xpackage = TiXmlElement('package')
xpackage.SetAttribute('name', self.packageName)
if self.platform:
xpackage.SetAttribute('platform', self.platform)
if self.version:
xpackage.SetAttribute('version', self.version)
xpackage.SetAttribute('host', self.host)
self.packageSeq.storeXml(xpackage, 'seq')
self.packageSetVer.storeXml(xpackage, 'set_ver')
requireHosts = {}
requireHosts[self.host] = True
for package in self.requires:
xrequires = TiXmlElement('requires')
xrequires.SetAttribute('name', package.packageName)
if self.platform and package.platform:
xrequires.SetAttribute('platform', package.platform)
if package.version:
xrequires.SetAttribute('version', package.version)
xrequires.SetAttribute('host', package.host)
package.packageSeq.storeXml(xrequires, 'seq')
package.packageSetVer.storeXml(xrequires, 'set_ver')
requireHosts[package.host] = True
xpackage.InsertEndChild(xrequires)
# Make sure we also write the full host descriptions for
# any hosts we reference, so we can find these guys later.
for host in requireHosts.keys():
he = self.packager.hosts.get(host, None)
if he:
xhost = he.makeXml(packager = self.packager)
xpackage.InsertEndChild(xhost)
self.components.sort()
for type, name, xcomponent in self.components:
xpackage.InsertEndChild(xcomponent)
doc.InsertEndChild(xpackage)
doc.SaveFile()
def readImportDescFile(self, filename):
""" Reads the import desc file. Returns True on success,
False on failure. """
self.packageSeq = SeqValue()
self.packageSetVer = SeqValue()
doc = TiXmlDocument(filename.toOsSpecific())
if not doc.LoadFile():
return False
xpackage = doc.FirstChildElement('package')
if not xpackage:
return False
self.packageName = xpackage.Attribute('name')
self.platform = xpackage.Attribute('platform')
self.version = xpackage.Attribute('version')
self.host = xpackage.Attribute('host')
# Get any new host descriptors.
xhost = xpackage.FirstChildElement('host')
while xhost:
he = self.packager.HostEntry()
he.loadXml(xhost, self)
if he.url not in self.packager.hosts:
self.packager.hosts[he.url] = he
xhost = xhost.NextSiblingElement('host')
self.packageSeq.loadXml(xpackage, 'seq')
self.packageSetVer.loadXml(xpackage, 'set_ver')
self.requires = []
xrequires = xpackage.FirstChildElement('requires')
while xrequires:
packageName = xrequires.Attribute('name')
platform = xrequires.Attribute('platform')
version = xrequires.Attribute('version')
host = xrequires.Attribute('host')
if packageName:
package = self.packager.findPackage(
packageName, platform = platform, version = version,
host = host, requires = self.requires)
if package:
self.requires.append(package)
xrequires = xrequires.NextSiblingElement('requires')
self.targetFilenames = {}
xcomponent = xpackage.FirstChildElement('component')
while xcomponent:
name = xcomponent.Attribute('filename')
if name:
self.targetFilenames[name.lower()] = True
xcomponent = xcomponent.NextSiblingElement('component')
self.moduleNames = {}
xmodule = xpackage.FirstChildElement('module')
while xmodule:
moduleName = xmodule.Attribute('name')
exclude = int(xmodule.Attribute('exclude') or 0)
forbid = int(xmodule.Attribute('forbid') or 0)
allowChildren = int(xmodule.Attribute('allowChildren') or 0)
if moduleName:
mdef = FreezeTool.Freezer.ModuleDef(
moduleName, exclude = exclude, forbid = forbid,
allowChildren = allowChildren)
self.moduleNames[moduleName] = mdef
xmodule = xmodule.NextSiblingElement('module')
return True
def getFileSpec(self, element, pathname, newName):
""" Returns an xcomponent or similar element with the file
information for the indicated file. """
xspec = TiXmlElement(element)
size = pathname.getFileSize()
timestamp = pathname.getTimestamp()
hv = HashVal()
hv.hashFile(pathname)
hash = hv.asHex()
xspec.SetAttribute('filename', newName)
xspec.SetAttribute('size', str(size))
xspec.SetAttribute('timestamp', str(timestamp))
xspec.SetAttribute('hash', hash)
return xspec
def addPyFile(self, file):
""" Adds the indicated python file, identified by filename
instead of by module name, to the package. """
# Convert the raw filename back to a module name, so we
# can see if we've already loaded this file. We assume
# that all Python files within the package will be rooted
# at the top of the package.
filename = file.newName.rsplit('.', 1)[0]
moduleName = filename.replace("/", ".")
if moduleName.endswith('.__init__'):
moduleName = moduleName.rsplit('.', 1)[0]
if moduleName in self.freezer.modules:
# This Python file is already known. We don't have to
# deal with it again.
return
# Make sure that it is actually in a package.
parentName = moduleName
while '.' in parentName:
parentName = parentName.rsplit('.', 1)[0]
if parentName not in self.freezer.modules:
message = 'Cannot add Python file %s; not in package' % (file.newName)
if file.required or file.explicit:
raise Exception(message)
else:
self.notify.warning(message)
return
if file.text:
self.freezer.addModule(moduleName, filename = file.filename, text = file.text)
else:
self.freezer.addModule(moduleName, filename = file.filename)
def addEggFile(self, file):
# Precompile egg files to bam's.
np = self.packager.loader.loadModel(file.filename, self.packager.loaderOptions)
if not np:
raise Exception('Could not read egg file %s' % (file.filename))
bamName = Filename(file.newName)
bamName.setExtension('bam')
self.addNode(np.node(), file.filename, str(bamName))
def addBamFile(self, file):
# Load the bam file so we can massage its textures.
bamFile = BamFile()
if not bamFile.openRead(file.filename):
raise Exception('Could not read bam file %s' % (file.filename))
bamFile.getReader().setLoaderOptions(self.packager.loaderOptions)
if not bamFile.resolve():
raise Exception('Could not resolve bam file %s' % (file.filename))
node = bamFile.readNode()
if not node:
raise Exception('Not a model file: %s' % (file.filename))
self.addNode(node, file.filename, file.newName)
def addNode(self, node, filename, newName):
""" Converts the indicated node to a bam stream, and adds the
bam file to the multifile under the indicated newName. """
# If the Multifile already has a file by this name, don't
# bother adding it again.
if self.multifile.findSubfile(newName) >= 0:
return
# Be sure to import all of the referenced textures, and tell
# them their new location within the multifile.
for tex in NodePath(node).findAllTextures():
if not tex.hasFullpath() and tex.hasRamImage():
# We need to store this texture as a raw-data image.
# Clear the newName so this will happen
# automatically.
tex.clearFilename()
tex.clearAlphaFilename()
else:
# We can store this texture as a file reference to its
# image. Copy the file into our multifile, and rename
# its reference in the texture.
if tex.hasFilename():
tex.setFilename(self.addFoundTexture(tex.getFullpath()))
if tex.hasAlphaFilename():
tex.setAlphaFilename(self.addFoundTexture(tex.getAlphaFullpath()))
# Now generate an in-memory bam file. Tell the bam writer to
# keep the textures referenced by their in-multifile path.
bamFile = BamFile()
stream = StringStream()
bamFile.openWrite(stream)
bamFile.getWriter().setFileTextureMode(bamFile.BTMUnchanged)
bamFile.writeObject(node)
bamFile.close()
# Clean the node out of memory.
node.removeAllChildren()
# Now we have an in-memory bam file.
stream.seekg(0)
self.multifile.addSubfile(newName, stream, self.compressionLevel)
# Flush it so the data gets written to disk immediately, so we
# don't have to keep it around in ram.
self.multifile.flush()
xcomponent = TiXmlElement('component')
xcomponent.SetAttribute('filename', newName)
self.components.append(('c', newName.lower(), xcomponent))
def addFoundTexture(self, filename):
""" Adds the newly-discovered texture to the output, if it has
not already been included. Returns the new name within the
package tree. """
filename = Filename(filename)
filename.makeCanonical()
file = self.sourceFilenames.get(filename, None)
if file:
# Never mind, it's already on the list.
return file.newName
# We have to copy the image into the plugin tree somewhere.
newName = self.importedMapsDir + '/' + filename.getBasename()
uniqueId = 0
while newName.lower() in self.targetFilenames:
uniqueId += 1
newName = '%s/%s_%s.%s' % (
self.importedMapsDir, filename.getBasenameWoExtension(),
uniqueId, filename.getExtension())
file = self.addFile(
filename, newName = newName, explicit = False,
compress = False)
if file:
# If we added the file in this pass, then also
# immediately add it to the multifile (because we
# won't be visiting the files list again).
self.addComponent(file)
return newName
def addDcFile(self, file):
""" Adds a dc file to the archive. A dc file gets its
internal comments and parameter names stripped out of the
final result automatically. This is as close as we can
come to "compiling" a dc file, since all of the remaining
symbols are meaningful at runtime. """
# First, read in the dc file
from panda3d.direct import DCFile
dcFile = DCFile()
if not dcFile.read(file.filename):
self.notify.error("Unable to parse %s." % (file.filename))
# And then write it out without the comments and such.
stream = StringStream()
if not dcFile.write(stream, True):
self.notify.error("Unable to write %s." % (file.filename))
file.text = stream.getData()
self.addComponent(file)
def addDcImports(self, file):
""" Adds the Python modules named by the indicated dc
file. """
from panda3d.direct import DCFile
dcFile = DCFile()
if not dcFile.read(file.filename):
self.notify.error("Unable to parse %s." % (file.filename))
for n in range(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)
moduleSuffixes = []
if '/' in moduleName:
moduleName, suffixes = moduleName.split('/', 1)
moduleSuffixes = suffixes.split('/')
self.freezer.addModule(moduleName)
for suffix in self.packager.dcClientSuffixes:
if suffix in moduleSuffixes:
self.freezer.addModule(moduleName + suffix)
for i in range(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
symbolSuffixes = []
if '/' in symbolName:
symbolName, suffixes = symbolName.split('/', 1)
symbolSuffixes = suffixes.split('/')
# "from moduleName import symbolName".
# Maybe this symbol is itself a module; if that's
# the case, we need to add it to the list also.
self.freezer.addModule('%s.%s' % (moduleName, symbolName),
implicit = True)
for suffix in self.packager.dcClientSuffixes:
if suffix in symbolSuffixes:
self.freezer.addModule('%s.%s%s' % (moduleName, symbolName, suffix),
implicit = True)
def addPrcFile(self, file):
""" Adds a prc file to the archive. Like the dc file,
this strips comments and such before adding. It's also
possible to set prcEncryptionKey and/or prcSignCommand to
further manipulate prc files during processing. """
# First, read it in.
if file.text:
textLines = file.text.split('\n')
else:
textLines = open(file.filename.toOsSpecific(), 'rU').readlines()
# Then write it out again, without the comments.
tempFilename = Filename.temporary('', 'p3d_', '.prc')
tempFilename.setBinary() # Binary is more reliable for signing.
temp = open(tempFilename.toOsSpecific(), 'w')
for line in textLines:
line = line.strip()
if line and line[0] != '#':
# Write the line out only if it's not a comment.
temp.write(line + '\n')
temp.close()
if self.packager.prcSignCommand:
# Now sign the file.
command = '%s -n "%s"' % (
self.packager.prcSignCommand, tempFilename.toOsSpecific())
self.notify.info(command)
exitStatus = os.system(command)
if exitStatus != 0:
self.notify.error('Command failed: %s' % (command))
if self.packager.prcEncryptionKey:
# And now encrypt it.
if file.newName.endswith('.prc'):
# Change .prc -> .pre
file.newName = file.newName[:-1] + 'e'
preFilename = Filename.temporary('', 'p3d_', '.pre')
preFilename.setBinary()
tempFilename.setText()
encryptFile(tempFilename, preFilename, self.packager.prcEncryptionKey)
tempFilename.unlink()
tempFilename = preFilename
if file.deleteTemp:
file.filename.unlink()
file.filename = tempFilename
file.text = None
file.deleteTemp = True
self.addComponent(file)
def addComponent(self, file):
compressionLevel = 0
if file.compress:
compressionLevel = self.compressionLevel
if file.text:
stream = StringStream(file.text)
self.multifile.addSubfile(file.newName, stream, compressionLevel)
self.multifile.flush()
elif file.executable and self.arch:
if not self.__addOsxExecutable(file):
return
else:
# Copy an ordinary file into the multifile.
self.multifile.addSubfile(file.newName, file.filename, compressionLevel)
if file.extract:
if file.text:
# Better write it to a temporary file, so we can
# get its hash.
tfile = Filename.temporary('', 'p3d_')
open(tfile.toOsSpecific(), 'wb').write(file.text)
xextract = self.getFileSpec('extract', tfile, file.newName)
tfile.unlink()
else:
# The file data exists on disk already.
xextract = self.getFileSpec('extract', file.filename, file.newName)
self.extracts.append((file.newName.lower(), xextract))
xcomponent = TiXmlElement('component')
xcomponent.SetAttribute('filename', file.newName)
self.components.append(('c', file.newName.lower(), xcomponent))
def __addOsxExecutable(self, file):
""" Adds an executable or shared library to the multifile,
with respect to OSX's fat-binary features. Returns true
on success, false on failure. """
compressionLevel = 0
if file.compress:
compressionLevel = self.compressionLevel
# If we're on OSX and adding only files for a
# particular architecture, use lipo to strip out the
# part of the file for that architecture.
arch = self.arch
if arch == "amd64":
arch = "x86_64"
# First, we need to verify that it is in fact a
# universal binary.
tfile = Filename.temporary('', 'p3d_')
tfile.setBinary()
command = '/usr/bin/lipo -info "%s" >"%s"' % (
file.filename.toOsSpecific(),
tfile.toOsSpecific())
exitStatus = os.system(command)
if exitStatus != 0:
self.notify.warning("Not an executable file: %s" % (file.filename))
# Just add it anyway.
file.filename.setBinary()
self.multifile.addSubfile(file.newName, file.filename, compressionLevel)
return True
# The lipo command succeeded, so it really is an
# executable file. Parse the lipo output to figure out
# which architectures the file supports.
arches = []
lipoData = open(tfile.toOsSpecific(), 'r').read()
tfile.unlink()
if ':' in lipoData:
arches = lipoData.rsplit(':', 1)[1]
arches = arches.split()
if arches == [arch]:
# The file only contains the one architecture that
# we want anyway.
file.filename.setBinary()
self.multifile.addSubfile(file.newName, file.filename, compressionLevel)
return True
if arch not in arches:
# The file doesn't support the architecture that we
# want at all. Omit the file.
self.notify.warning("%s doesn't support architecture %s" % (
file.filename, self.arch))
return False
# The file contains multiple architectures. Get
# out just the one we want.
command = '/usr/bin/lipo -thin %s -output "%s" "%s"' % (
arch, tfile.toOsSpecific(),
file.filename.toOsSpecific())
exitStatus = os.system(command)
if exitStatus != 0:
self.notify.error('Command failed: %s' % (command))
self.multifile.addSubfile(file.newName, tfile, compressionLevel)
if file.deleteTemp:
file.filename.unlink()
file.filename = tfile
file.deleteTemp = True
return True
def requirePackage(self, package):
""" Indicates a dependency on the given package. This
also implicitly requires all of the package's requirements
as well (though this transitive requirement happens at
runtime, not here at build time). """
if package not in self.requires:
self.requires.append(package)
for lowerName in package.targetFilenames:
ext = Filename(lowerName).getExtension()
if ext not in self.packager.nonuniqueExtensions:
self.skipFilenames[lowerName] = True
for moduleName, mdef in package.moduleNames.items():
if not mdef.exclude:
self.skipModules[moduleName] = mdef
# Packager constructor
def __init__(self, platform = None):
# The following are config settings that the caller may adjust
# before calling any of the command methods.
# The platform string.
self.setPlatform(platform)
# This should be set to a Filename.
self.installDir = None
# If specified, this is a directory to search first for any
# library references, before searching the system.
# Particularly useful on OSX to reference the universal SDK.
self.systemRoot = None
# Set this true to treat setHost() the same as addHost(), thus
# ignoring any request to specify a particular download host,
# e.g. for testing and development.
self.ignoreSetHost = False
# Set this to true to verbosely log files ignored by dir().
self.verbosePrint = False
# This will be appended to the basename of any .p3d package,
# before the .p3d extension.
self.p3dSuffix = ''
# The download URL at which these packages will eventually be
# hosted.
self.hosts = {}
self.host = PandaSystem.getPackageHostUrl()
self.addHost(self.host)
# This will be used when we're not compiling in the packaged
# environment.
self.__hostInfos = {}
self.http = HTTPClient.getGlobalPtr()
# The maximum amount of time a client should cache the
# contents.xml before re-querying the server, in seconds.
self.maxAge = 0
# The contents seq: a tuple of integers, representing the
# current seq value. The contents seq generally increments
# with each modification to the contents.xml file. There is
# also a package seq for each package, which generally
# increments with each modification to the package.
# The contents seq and package seq are used primarily for
# documentation purposes, to note when a new version is
# released. The package seq value can also be used to verify
# that the contents.xml, desc.xml, and desc.import.xml files
# were all built at the same time.
# Although the package seqs are used at runtime to verify that
# the latest contents.xml file has been downloaded, they are
# not otherwise used at runtime, and they are not binding on
# the download version. The md5 hash, not the package seq, is
# actually used to differentiate different download versions.
self.contentsSeq = SeqValue()
# A search list for previously-built local packages.
# We use a bit of caution to read the Filenames out of the
# config variable. Since cvar.getDirectories() returns a list
# of references to Filename objects stored within the config
# variable itself, we have to make a copy of each Filename
# returned, so they will persist beyond the lifespan of the
# config variable.
cvar = ConfigVariableSearchPath('pdef-path')
self.installSearch = list(map(Filename, cvar.getDirectories()))
# This is where we cache the location of libraries.
self.libraryCache = {}
# The system PATH, for searching dll's and exe's.
self.executablePath = DSearchPath()
# By convention, we include sys.path at the front of
# self.executablePath, mainly to aid makepanda when building
# an rtdist build.
for dirname in sys.path:
self.executablePath.appendDirectory(Filename.fromOsSpecific(dirname))
# Now add the actual system search path.
if self.platform.startswith('win'):
self.addWindowsSearchPath(self.executablePath, "PATH")
else:
if self.platform.startswith('osx'):
self.addPosixSearchPath(self.executablePath, "DYLD_LIBRARY_PATH")
self.addPosixSearchPath(self.executablePath, "LD_LIBRARY_PATH")
self.addPosixSearchPath(self.executablePath, "PATH")
if self.platform.startswith('linux'):
# It used to be okay to just add some common paths on Linux.
# But nowadays, each distribution has their own convention for
# where they put their libraries. Instead, we query the ldconfig
# cache, which contains the location of all libraries.
if not self.loadLdconfigCache():
# Ugh, failure. All that remains is to guess. This should
# work for the most common Debian configurations.
multiarchDir = "/lib/%s-linux-gnu" % (os.uname()[4])
if os.path.isdir(multiarchDir):
self.executablePath.appendDirectory(multiarchDir)
if os.path.isdir("/usr/" + multiarchDir):
self.executablePath.appendDirectory("/usr/" + multiarchDir)
else:
# FreeBSD, or some other system that still makes sense.
self.executablePath.appendDirectory('/lib')
self.executablePath.appendDirectory('/usr/lib')
self.executablePath.appendDirectory('/usr/local/lib')
if self.platform.startswith('freebsd') and os.uname()[1] == "pcbsd":
self.executablePath.appendDirectory('/usr/PCBSD/local/lib')
# Set this flag true to automatically add allow_python_dev to
# any applications.
self.allowPythonDev = False
# Set this flag to store the original Python source files,
# without compiling them to .pyc or .pyo.
self.storePythonSource = False
# Fill this with a list of (certificate, chain, pkey,
# password) tuples to automatically sign each p3d file
# generated.
self.signParams = []
# Optional signing and encrypting features.
self.encryptionKey = None
self.prcEncryptionKey = None
self.prcSignCommand = None
# This is a list of filename extensions and/or basenames that
# indicate files that should be encrypted within the
# multifile. This provides obfuscation only, not real
# security, since the decryption key must be part of the
# client and is therefore readily available to any hacker.
# Not only is this feature useless, but using it also
# increases the size of your patchfiles, since encrypted files
# can't really be patched. But it's here if you really want
# it. ** Note: Actually, this isn't implemented yet.
#self.encryptExtensions = []
#self.encryptFiles = []
# This is the list of DC import suffixes that should be
# available to the client. Other suffixes, like AI and UD,
# are server-side only and should be ignored by the Scrubber.
self.dcClientSuffixes = ['OV']
# Is this file system case-sensitive?
self.caseSensitive = True
if self.platform.startswith('win'):
self.caseSensitive = False
elif self.platform.startswith('osx'):
self.caseSensitive = False
# Get the list of filename extensions that are recognized as
# image files.
self.imageExtensions = []
for type in PNMFileTypeRegistry.getGlobalPtr().getTypes():
self.imageExtensions += type.getExtensions()
# Other useful extensions. The .pz extension is implicitly
# stripped.
# Model files.
self.modelExtensions = [ 'egg', 'bam' ]
# Text files that are copied (and compressed) to the package
# with end-of-line conversion.
self.textExtensions = [ 'prc', 'ptf', 'txt', 'cg', 'sha', 'dc', 'xml' ]
# Binary files that are copied (and compressed) without
# processing.
self.binaryExtensions = [ 'ttf', 'TTF', 'mid', 'ico', 'cur' ]
# Files that can have an existence in multiple different
# packages simultaneously without conflict.
self.nonuniqueExtensions = [ 'prc' ]
# Files that represent an executable or shared library.
if self.platform.startswith('win'):
self.executableExtensions = [ 'dll', 'pyd', 'exe' ]
elif self.platform.startswith('osx'):
self.executableExtensions = [ 'so', 'dylib' ]
else:
self.executableExtensions = [ 'so' ]
# Files that represent a Windows "manifest" file. These files
# must be explicitly extracted to disk so the OS can find
# them.
if self.platform.startswith('win'):
self.manifestExtensions = [ 'manifest' ]
else:
self.manifestExtensions = [ ]
# Extensions that are automatically remapped by convention.
self.remapExtensions = {}
if self.platform.startswith('win'):
pass
elif self.platform.startswith('osx'):
self.remapExtensions = {
'dll' : 'dylib',
'pyd' : 'so',
'exe' : ''
}
else:
self.remapExtensions = {
'dll' : 'so',
'pyd' : 'so',
'exe' : ''
}
# Files that should be extracted to disk.
self.extractExtensions = self.executableExtensions[:] + self.manifestExtensions[:] + [ 'ico', 'cur' ]
# Files that indicate a platform dependency.
self.platformSpecificExtensions = self.executableExtensions[:]
# Binary files that are considered uncompressible, and are
# copied without compression.
self.uncompressibleExtensions = [ 'mp3', 'ogg', 'ogv', 'wav', 'rml', 'rcss', 'otf' ]
# wav files are compressible, but p3openal_audio won't load
# them compressed.
# rml, rcss and otf files must be added here because
# libRocket wants to be able to seek in these files.
# Files which are not to be processed further, but which
# should be added exactly byte-for-byte as they are.
self.unprocessedExtensions = []
# Files for which warnings should be suppressed when they are
# not handled by dir()
self.suppressWarningForExtensions = ['', 'pyc', 'pyo',
'p3d', 'pdef',
'c', 'C', 'cxx', 'cpp', 'h', 'H',
'hpp', 'pp', 'I', 'pem', 'p12', 'crt',
'o', 'obj', 'a', 'lib', 'bc', 'll']
# System files that should never be packaged. For
# case-insensitive filesystems (like Windows and OSX), put the
# lowercase filename here. Case-sensitive filesystems should
# use the correct case.
self.excludeSystemFiles = [
'kernel32.dll', 'user32.dll', 'wsock32.dll', 'ws2_32.dll',
'advapi32.dll', 'opengl32.dll', 'glu32.dll', 'gdi32.dll',
'shell32.dll', 'ntdll.dll', 'ws2help.dll', 'rpcrt4.dll',
'imm32.dll', 'ddraw.dll', 'shlwapi.dll', 'secur32.dll',
'dciman32.dll', 'comdlg32.dll', 'comctl32.dll', 'ole32.dll',
'oleaut32.dll', 'gdiplus.dll', 'winmm.dll', 'iphlpapi.dll',
'msvcrt.dll', 'kernelbase.dll', 'msimg32.dll', 'msacm32.dll',
'libsystem.b.dylib', 'libmathcommon.a.dylib', 'libmx.a.dylib',
'libstdc++.6.dylib', 'libobjc.a.dylib', 'libauto.dylib',
]
# As above, but with filename globbing to catch a range of
# filenames.
self.excludeSystemGlobs = [
GlobPattern('d3dx9_*.dll'),
GlobPattern('api-ms-win-*.dll'),
GlobPattern('libGL.so*'),
GlobPattern('libGLU.so*'),
GlobPattern('libGLcore.so*'),
GlobPattern('libGLES*.so*'),
GlobPattern('libEGL.so*'),
GlobPattern('libX11.so*'),
GlobPattern('libXau.so*'),
GlobPattern('libXdmcp.so*'),
GlobPattern('libxcb*.so*'),
GlobPattern('libc.so*'),
GlobPattern('libgcc_s.so*'),
GlobPattern('libdl.so*'),
GlobPattern('libm.so*'),
GlobPattern('libnvidia*.so*'),
GlobPattern('libpthread.so*'),
GlobPattern('libthr.so*'),
GlobPattern('ld-linux.so*'),
GlobPattern('ld-linux-*.so*'),
GlobPattern('librt.so*'),
]
# A Loader for loading models.
self.loader = Loader.Loader(self)
self.sfxManagerList = None
self.musicManager = None
# These options will be used when loading models and textures. By
# default we don't load textures beyond the header and don't store
# models in the RAM cache in order to conserve on memory usage.
opts = LoaderOptions()
opts.setFlags(opts.getFlags() | LoaderOptions.LFNoRamCache)
opts.setTextureFlags(opts.getTextureFlags() & ~LoaderOptions.TFPreload)
self.loaderOptions = opts
# This is filled in during readPackageDef().
self.packageList = []
# A table of all known packages by name.
self.packages = {}
# A list of PackageEntry objects read from the contents.xml
# file.
self.contents = {}
def loadLdconfigCache(self):
""" On GNU/Linux, runs ldconfig -p to find out where all the
libraries on the system are located. Assumes that the platform
has already been set. """
if not os.path.isfile('/sbin/ldconfig'):
return False
handle = subprocess.Popen(['/sbin/ldconfig', '-p'], stdout=subprocess.PIPE, universal_newlines=True)
out, err = handle.communicate()
if handle.returncode != 0:
self.notify.warning("/sbin/ldconfig -p returned code %d" %(handle.returncode))
return False
for line in out.splitlines():
if '=>' not in line:
continue
prefix, location = line.rsplit('=>', 1)
prefix = prefix.strip()
location = location.strip()
if not location or not prefix or ' ' not in prefix:
self.notify.warning("Ignoring malformed ldconfig -p line: " + line)
continue
lib, opts = prefix.split(' ', 1)
if ('x86-64' in opts) != self.platform.endswith('_amd64'):
# This entry isn't meant for our architecture. I think
# x86-64 is the only platform where ldconfig supplies
# this extra arch string.
continue
self.libraryCache[lib] = Filename.fromOsSpecific(location)
return True
def resolveLibrary(self, filename):
""" Resolves the given shared library filename along the executable path,
or by cross-referencing it with the library cache. """
path = str(filename)
if path in self.libraryCache:
filename.setFullpath(self.libraryCache[path].getFullpath())
return True
if filename.resolveFilename(self.executablePath):
self.libraryCache[path] = Filename(filename)
return True
return False
def setPlatform(self, platform = None):
""" Sets the platform that this Packager will compute for. On
OSX, this can be used to specify the particular architecture
we are building; on other platforms, it is probably a mistake
to set this.
You should call this before doing anything else with the
Packager. It's even better to pass the platform string to the
constructor. """
self.platform = platform or PandaSystem.getPlatform()
# OSX uses this "arch" string for the otool and lipo commands.
self.arch = None
if self.platform.startswith('osx_'):
self.arch = self.platform[4:]
def setHost(self, host, downloadUrl = None,
descriptiveName = None, hostDir = None,
mirrors = None):
""" Specifies the URL that will ultimately host these
contents. """
if not self.ignoreSetHost:
self.host = host
self.addHost(host, downloadUrl = downloadUrl,
descriptiveName = descriptiveName, hostDir = hostDir,
mirrors = mirrors)
def addHost(self, host, downloadUrl = None, descriptiveName = None,
hostDir = None, mirrors = None):
""" Adds a host to the list of known download hosts. This
information will be written into any p3d files that reference
this host; this can be used to pre-define the possible mirrors
for a given host, for instance. Returns the newly-created
HostEntry object."""
scheme = URLSpec(host).getScheme()
if scheme == 'https' and downloadUrl is None:
# If we specified an SSL-protected host URL, but no
# explicit download URL, then assume the download URL is
# the same, over cleartext.
url = URLSpec(host)
url.setScheme('http')
downloadUrl = url.getUrl()
he = self.hosts.get(host, None)
if he is None:
# Define a new host entry
he = self.HostEntry(host, downloadUrl = downloadUrl,
descriptiveName = descriptiveName,
hostDir = hostDir, mirrors = mirrors)
self.hosts[host] = he
else:
# Update an existing host entry
if downloadUrl is not None:
he.downloadUrl = downloadUrl
if descriptiveName is not None:
he.descriptiveName = descriptiveName
if hostDir is not None:
he.hostDir = hostDir
if mirrors is not None:
he.mirrors = mirrors
return he
def addAltHost(self, keyword, altHost, origHost = None,
downloadUrl = None, descriptiveName = None,
hostDir = None, mirrors = None):
""" Adds an alternate host to any already-known host. This
defines an alternate server that may be contacted, if
specified on the HTML page, which hosts a different version of
the server's contents. (This is different from a mirror,
which hosts an identical version of the server's contents.)
"""
if not origHost:
origHost = self.host
self.addHost(altHost, downloadUrl = downloadUrl,
descriptiveName = descriptiveName, hostDir = hostDir,
mirrors = mirrors)
he = self.addHost(origHost)
he.altHosts[keyword] = altHost
def addWindowsSearchPath(self, searchPath, varname):
""" Expands $varname, interpreting as a Windows-style search
path, and adds its contents to the indicated DSearchPath. """
path = ExecutionEnvironment.getEnvironmentVariable(varname)
if len(path) == 0:
if varname not in os.environ:
return
path = os.environ[varname]
for dirname in path.split(';'):
dirname = Filename.fromOsSpecific(dirname)
if dirname.makeTrueCase():
searchPath.appendDirectory(dirname)
def addPosixSearchPath(self, searchPath, varname):
""" Expands $varname, interpreting as a Posix-style search
path, and adds its contents to the indicated DSearchPath. """
path = ExecutionEnvironment.getEnvironmentVariable(varname)
if len(path) == 0:
if varname not in os.environ:
return
path = os.environ[varname]
for dirname in path.split(':'):
dirname = Filename.fromOsSpecific(dirname)
if dirname.makeTrueCase():
searchPath.appendDirectory(dirname)
def _ensureExtensions(self):
self.knownExtensions = \
self.imageExtensions + \
self.modelExtensions + \
self.textExtensions + \
self.binaryExtensions + \
self.uncompressibleExtensions + \
self.unprocessedExtensions
def setup(self):
""" Call this method to initialize the class after filling in
some of the values in the constructor. """
self._ensureExtensions()
self.currentPackage = None
if self.installDir:
# If we were given an install directory, we can build
# packages as well as plain p3d files, and it all goes
# into the specified directory.
self.p3dInstallDir = self.installDir
self.allowPackages = True
else:
# If we don't have an actual install directory, we can
# only build p3d files, and we drop them into the current
# directory.
self.p3dInstallDir = '.'
self.allowPackages = False
if not PandaSystem.getPackageVersionString() or not PandaSystem.getPackageHostUrl():
raise PackagerError('This script must be run using a version of Panda3D that has been built\nfor distribution. Try using ppackage.p3d or packp3d.p3d instead.\nIf you are running this script for development purposes, you may also\nset the Config variable panda-package-host-url to the URL you expect\nto download these contents from (for instance, a file:// URL).')
self.readContentsFile()
def close(self):
""" Called after reading all of the package def files, this
performs any final cleanup appropriate. """
self.writeContentsFile()
def buildPatches(self, packages):
""" Call this after calling close(), to build patches for the
indicated packages. """
# We quietly ignore any p3d applications or solo packages
# passed in the packages list; we only build patches for
# actual Multifile-based packages.
packageNames = []
for package in packages:
if not package.p3dApplication and not package.solo:
packageNames.append(package.packageName)
if packageNames:
from .PatchMaker import PatchMaker
pm = PatchMaker(self.installDir)
pm.buildPatches(packageNames = packageNames)
def readPackageDef(self, packageDef, packageNames = None):
""" Reads the named .pdef file and constructs the named
packages, or all packages if packageNames is None. Raises an
exception if the pdef file is invalid. Returns the list of
packages constructed. """
self.notify.info('Reading %s' % (packageDef))
# We use exec to "read" the .pdef file. This has the nice
# side-effect that the user can put arbitrary Python code in
# there to control conditional execution, and such.
# Set up the namespace dictionary for exec.
globals = {}
globals['__name__'] = packageDef.getBasenameWoExtension()
globals['__dir__'] = Filename(packageDef.getDirname()).toOsSpecific()
globals['__file__'] = packageDef.toOsSpecific()
globals['packageDef'] = packageDef
globals['platform'] = self.platform
globals['packager'] = self
# We'll stuff all of the predefined functions, and the
# predefined classes, in the global dictionary, so the pdef
# file can reference them.
# By convention, the existence of a method of this class named
# do_foo(self) is sufficient to define a pdef method call
# foo().
for methodName in list(self.__class__.__dict__.keys()):
if methodName.startswith('do_'):
name = methodName[3:]
c = func_closure(name)
globals[name] = c.generic_func
globals['p3d'] = class_p3d
globals['package'] = class_package
globals['solo'] = class_solo
# Now exec the pdef file. Assuming there are no syntax
# errors, and that the pdef file doesn't contain any really
# crazy Python code, all this will do is fill in the
# '__statements' list in the module scope.
fn = packageDef.toOsSpecific()
f = open(fn)
code = compile(f.read(), fn, 'exec')
f.close()
# It appears that having a separate globals and locals
# dictionary causes problems with resolving symbols within a
# class scope. So, we just use one dictionary, the globals.
exec(code, globals)
packages = []
# Now iterate through the statements and operate on them.
statements = globals.get('__statements', [])
if not statements:
self.notify.info("No packages defined.")
try:
for (lineno, stype, name, args, kw) in statements:
if stype == 'class':
if packageNames is None or name in packageNames:
classDef = globals[name]
p3dApplication = (class_p3d in classDef.__bases__)
solo = (class_solo in classDef.__bases__)
self.beginPackage(name, p3dApplication = p3dApplication,
solo = solo)
statements = classDef.__dict__.get('__statements', [])
if not statements:
self.notify.info("No files added to %s" % (name))
for (lineno, stype, sname, args, kw) in statements:
if stype == 'class':
raise PackagerError('Nested classes not allowed')
self.__evalFunc(sname, args, kw)
package = self.endPackage()
if package is not None:
packages.append(package)
elif packageNames is not None:
# If the name is explicitly specified, this means
# we should abort if the package faild to construct.
raise PackagerError('Failed to construct %s' % name)
else:
self.__evalFunc(name, args, kw)
except PackagerError:
# Append the line number and file name to the exception
# error message.
inst = sys.exc_info()[1]
if not inst.args:
inst.args = ('Error',)
inst.args = (inst.args[0] + ' on line %s of %s' % (lineno, packageDef),)
raise
return packages
def __evalFunc(self, name, args, kw):
""" This is called from readPackageDef(), above, to call the
function do_name(*args, **kw), as extracted from the pdef
file. """
funcname = 'do_%s' % (name)
func = getattr(self, funcname)
try:
func(*args, **kw)
except OutsideOfPackageError:
message = '%s encountered outside of package definition' % (name)
raise OutsideOfPackageError(message)
def __expandTabs(self, line, tabWidth = 8):
""" Expands tab characters in the line to 8 spaces. """
p = 0
while p < len(line):
if line[p] == '\t':
# Expand a tab.
nextStop = ((p + tabWidth) / tabWidth) * tabWidth
numSpaces = nextStop - p
line = line[:p] + ' ' * numSpaces + line[p + 1:]
p = nextStop
else:
p += 1
return line
def __countLeadingWhitespace(self, line):
""" Returns the number of leading whitespace characters in the
line. """
line = self.__expandTabs(line)
return len(line) - len(line.lstrip())
def __stripLeadingWhitespace(self, line, whitespaceCount):
""" Removes the indicated number of whitespace characters, but
no more. """
line = self.__expandTabs(line)
line = line[:whitespaceCount].lstrip() + line[whitespaceCount:]
return line
def __parseArgs(self, words, argList):
args = {}
while len(words) > 1:
arg = words[-1]
if '=' not in arg:
return args
parameter, value = arg.split('=', 1)
parameter = parameter.strip()
value = value.strip()
if parameter not in argList:
message = 'Unknown parameter %s' % (parameter)
raise PackagerError(message)
if parameter in args:
message = 'Duplicate parameter %s' % (parameter)
raise PackagerError(message)
args[parameter] = value
del words[-1]
def beginPackage(self, packageName, p3dApplication = False,
solo = False):
""" Begins a new package specification. packageName is the
basename of the package. Follow this with a number of calls
to file() etc., and close the package with endPackage(). """
if self.currentPackage:
raise PackagerError('unclosed endPackage %s' % (self.currentPackage.packageName))
package = self.Package(packageName, self)
self.currentPackage = package
package.p3dApplication = p3dApplication
package.solo = solo
if not package.p3dApplication and not self.allowPackages:
message = 'Cannot generate packages without an installDir; use -i'
raise PackagerError(message)
def endPackage(self):
""" Closes the current package specification. This actually
generates the package file. Returns the finished package,
or None if the package failed to close (e.g. missing files). """
if not self.currentPackage:
raise PackagerError('unmatched endPackage')
package = self.currentPackage
package.signParams += self.signParams[:]
self.currentPackage = None
if not package.close():
return None
self.packageList.append(package)
self.packages[(package.packageName, package.platform, package.version)] = package
self.currentPackage = None
return package
def findPackage(self, packageName, platform = None, version = None,
host = None, requires = None):
""" Searches for the named package from a previous publish
operation along the install search path.
If requires is not None, it is a list of Package objects that
are already required. The new Package object must be
compatible with the existing Packages, or an error is
returned. This is also useful for determining the appropriate
package version to choose when a version is not specified.
Returns the Package object, or None if the package cannot be
located. """
# Is it a package we already have resident?
package = self.packages.get((packageName, platform or self.platform, version, host), None)
if package:
return package
# Look on the searchlist.
for dirname in self.installSearch:
package = self.__scanPackageDir(dirname, packageName, platform or self.platform, version, host, requires = requires)
if not package:
package = self.__scanPackageDir(dirname, packageName, platform, version, host, requires = requires)
if package and host and package.host != host:
# Wrong host.
package = None
if package:
break
if not package:
# Query the indicated host.
package = self.__findPackageOnHost(packageName, platform or self.platform, version or None, host, requires = requires)
if not package:
package = self.__findPackageOnHost(packageName, platform, version, host, requires = requires)
if package:
package = self.packages.setdefault((package.packageName, package.platform, package.version, package.host), package)
self.packages[(packageName, platform or self.platform, version, host)] = package
return package
return None
def __scanPackageDir(self, rootDir, packageName, platform, version,
host, requires = None):
""" Scans a directory on disk, looking for *.import.xml files
that match the indicated packageName and optional version. If a
suitable xml file is found, reads it and returns the assocated
Package definition.
If a version is not specified, and multiple versions are
available, the highest-numbered version that matches will be
selected.
"""
packages = []
if version:
# A specific version package.
versionList = [version]
else:
# An unversioned package, or any old version.
versionList = [None, '*']
for version in versionList:
packageDir = Filename(rootDir, packageName)
basename = packageName
if version:
# A specific or nonspecific version package.
packageDir = Filename(packageDir, version)
basename += '.%s' % (version)
if platform:
packageDir = Filename(packageDir, platform)
basename += '.%s' % (platform)
# Actually, the host means little for this search, since we're
# only looking in a local directory at this point.
basename += '.import.xml'
filename = Filename(packageDir, basename)
filelist = glob.glob(filename.toOsSpecific())
if not filelist:
# It doesn't exist in the nested directory; try the root
# directory.
filename = Filename(rootDir, basename)
filelist = glob.glob(filename.toOsSpecific())
for file in filelist:
package = self.__readPackageImportDescFile(Filename.fromOsSpecific(file))
packages.append(package)
self.__sortImportPackages(packages)
for package in packages:
if package and self.__packageIsValid(package, requires, platform):
return package
return None
def __findPackageOnHost(self, packageName, platform, version, hostUrl, requires = None):
appRunner = AppRunnerGlobal.appRunner
# Make sure we have a fresh version of the contents file.
host = self.__getHostInfo(hostUrl)
if not host.downloadContentsFile(self.http):
return None
packageInfos = []
packageInfo = host.getPackage(packageName, version, platform = platform)
if not packageInfo and not version:
# No explicit version is specified, first fallback: look
# for the compiled-in version.
packageInfo = host.getPackage(packageName, PandaSystem.getPackageVersionString(), platform = platform)
if not packageInfo and not version:
# No explicit version is specified, second fallback: get
# the highest-numbered version available.
packageInfos = host.getPackages(packageName, platform = platform)
self.__sortPackageInfos(packageInfos)
if packageInfo and not packageInfos:
packageInfos = [packageInfo]
for packageInfo in packageInfos:
if not packageInfo or not packageInfo.importDescFile:
continue
# Now we've retrieved a PackageInfo. Get the import desc file
# from it.
if host.hostDir:
filename = Filename(host.hostDir, 'imports/' + packageInfo.importDescFile.basename)
else:
# We're not running in the packaged environment, so download
# to a temporary file instead of the host directory.
filename = Filename.temporary('', 'import_' + packageInfo.importDescFile.basename, '.xml')
if not host.freshenFile(self.http, packageInfo.importDescFile, filename):
self.notify.error("Couldn't download import file.")
continue
# Now that we have the import desc file, use it to load one of
# our Package objects.
package = self.Package('', self)
success = package.readImportDescFile(filename)
if not host.hostDir:
# Don't forget to delete the temporary file we created.
filename.unlink()
if success and self.__packageIsValid(package, requires, platform):
return package
# Couldn't find a suitable package.
return None
def __getHostInfo(self, hostUrl = None):
""" This shadows appRunner.getHost(), for the purpose of running
outside the packaged environment. """
if not hostUrl:
hostUrl = PandaSystem.getPackageHostUrl()
if AppRunnerGlobal.appRunner:
return AppRunnerGlobal.appRunner.getHost(hostUrl)
host = self.__hostInfos.get(hostUrl, None)
if not host:
host = HostInfo(hostUrl)
self.__hostInfos[hostUrl] = host
return host
def __sortImportPackages(self, packages):
""" Given a list of Packages read from *.import.xml filenames,
sorts them in reverse order by version, so that the
highest-numbered versions appear first in the list. """
tuples = []
for package in packages:
version = self.__makeVersionTuple(package.version)
tuples.append((version, package))
tuples.sort(reverse = True)
return [t[1] for t in tuples]
def __sortPackageInfos(self, packages):
""" Given a list of PackageInfos retrieved from a Host, sorts
them in reverse order by version, so that the highest-numbered
versions appear first in the list. """
tuples = []
for package in packages:
version = self.__makeVersionTuple(package.packageVersion)
tuples.append((version, package))
tuples.sort(reverse = True)
return [t[1] for t in tuples]
def __makeVersionTuple(self, version):
""" Converts a version string into a tuple for sorting, by
separating out numbers into separate numeric fields, so that
version numbers sort numerically where appropriate. """
if not version:
return ('',)
words = []
p = 0
while p < len(version):
# Scan to the first digit.
w = ''
while p < len(version) and not version[p].isdigit():
w += version[p]
p += 1
words.append(w)
# Scan to the end of the string of digits.
w = ''
while p < len(version) and version[p].isdigit():
w += version[p]
p += 1
if w:
words.append(int(w))
return tuple(words)
def __packageIsValid(self, package, requires, platform):
""" Returns true if the package is valid, meaning it can be
imported without conflicts with existing packages already
required (such as different versions of panda3d). """
if package.platform and package.platform != platform:
# Incorrect platform.
return False
if not requires:
# No other restrictions.
return True
# Really, we only check the panda3d package. The other
# packages will list this as a dependency, and this is all
# that matters.
panda1 = self.__findPackageInRequires('panda3d', [package] + package.requires)
panda2 = self.__findPackageInRequires('panda3d', requires)
if not panda1 or not panda2:
return True
if panda1.version == panda2.version:
return True
print('Rejecting package %s, version "%s": depends on %s, version "%s" instead of version "%s"' % (
package.packageName, package.version,
panda1.packageName, panda1.version, panda2.version))
return False
def __findPackageInRequires(self, packageName, list):
""" Returns the first package with the indicated name in the
list of packages, or in the list of packages required by the
packages in the list. """
for package in list:
if package.packageName == packageName:
return package
p2 = self.__findPackageInRequires(packageName, package.requires)
if p2:
return p2
return None
def __readPackageImportDescFile(self, filename):
""" Reads the named xml file as a Package, and returns it if
valid, or None otherwise. """
package = self.Package('', self)
if package.readImportDescFile(filename):
return package
return None
def do_setVer(self, value):
""" Sets an explicit set_ver number for the package, as a tuple
of integers, or as a string of dot-separated integers. """
self.currentPackage.packageSetVer = SeqValue(value)
def do_config(self, **kw):
""" Called with any number of keyword parameters. For each
keyword parameter, sets the corresponding p3d config variable
to the given value. This will be written into the
p3d_info.xml file at the top of the application, or to the
package desc file for a package file. """
if not self.currentPackage:
raise OutsideOfPackageError
for keyword, value in list(kw.items()):
self.currentPackage.configs[keyword] = value
def do_require(self, *args, **kw):
""" Indicates a dependency on the named package(s), supplied
as a name.
Attempts to install this package will implicitly install the
named package also. Files already included in the named
package will be omitted from this one when building it. """
self.requirePackagesNamed(args, **kw)
def requirePackagesNamed(self, names, version = None, host = None):
""" Indicates a dependency on the named package(s), supplied
as a name.
Attempts to install this package will implicitly install the
named package also. Files already included in the named
package will be omitted from this one when building it. """
if not self.currentPackage:
raise OutsideOfPackageError
for packageName in names:
# A special case when requiring the "panda3d" package. We
# supply the version number which we've been compiled with
# as a default.
pversion = version
phost = host
if packageName == 'panda3d':
if not pversion:
pversion = PandaSystem.getPackageVersionString()
if not phost:
phost = PandaSystem.getPackageHostUrl()
package = self.findPackage(packageName, version = pversion, host = phost,
requires = self.currentPackage.requires)
if not package:
message = 'Unknown package %s, version "%s"' % (packageName, version)
self.notify.warning(message)
self.currentPackage.missingPackages.append((packageName, pversion))
continue
self.requirePackage(package)
def requirePackage(self, package):
""" Indicates a dependency on the indicated package, supplied
as a Package object.
Attempts to install this package will implicitly install the
named package also. Files already included in the named
package will be omitted from this one. """
if not self.currentPackage:
raise OutsideOfPackageError
# A special case when requiring the "panda3d" package. We
# complain if the version number doesn't match what we've been
# compiled with.
if package.packageName == 'panda3d':
if package.version != PandaSystem.getPackageVersionString():
self.notify.warning("Requiring panda3d version %s, which does not match the current build of Panda, which is version %s." % (package.version, PandaSystem.getPackageVersionString()))
elif package.host != PandaSystem.getPackageHostUrl():
self.notify.warning("Requiring panda3d host %s, which does not match the current build of Panda, which is host %s." % (package.host, PandaSystem.getPackageHostUrl()))
self.currentPackage.requirePackage(package)
def do_module(self, *args, **kw):
""" Adds the indicated Python module(s) to the current package. """
self.addModule(args, **kw)
def addModule(self, moduleNames, newName = None, filename = None, required = False):
if not self.currentPackage:
raise OutsideOfPackageError
if (newName or filename) and len(moduleNames) != 1:
raise PackagerError('Cannot specify newName with multiple modules')
if required:
self.currentPackage.requiredModules += moduleNames
for moduleName in moduleNames:
self.currentPackage.freezer.addModule(moduleName, newName = newName, filename = filename)
def do_excludeModule(self, *args):
""" Marks the indicated Python module as not to be included. """
if not self.currentPackage:
raise OutsideOfPackageError
for moduleName in args:
self.currentPackage.freezer.excludeModule(moduleName)
def do_main(self, filename):
""" Includes the indicated file as __main__ module of the application.
Also updates mainModule to point to this module. """
self.addModule(['__main__'], '__main__', filename, required = True)
self.currentPackage.mainModule = ('__main__', '__main__')
def do_mainModule(self, moduleName, newName = None, filename = None):
""" Names the indicated module as the "main" module of the
application or exe. In most cases, you will want to use main()
instead. """
if not self.currentPackage:
raise OutsideOfPackageError
if self.currentPackage.mainModule and self.currentPackage.mainModule[0] != moduleName:
self.notify.warning("Replacing mainModule %s with %s" % (
self.currentPackage.mainModule[0], moduleName))
if not newName:
newName = moduleName
if filename:
filename = Filename(filename)
newFilename = Filename('/'.join(moduleName.split('.')))
newFilename.setExtension(filename.getExtension())
self.currentPackage.addFile(
filename, newName = str(newFilename),
explicit = True, extract = True, required = True)
self.currentPackage.mainModule = (moduleName, newName)
def do_sign(self, certificate, chain = None, pkey = None, password = None):
""" Signs the resulting p3d file (or package multifile) with
the indicated certificate. If needed, the chain file should
contain the list of additional certificate authorities needed
to validate the signing certificate. The pkey file should
contain the private key.
It is also legal for the certificate file to contain the chain
and private key embedded within it.
If the private key is encrypted, the password should be
supplied. """
self.currentPackage.signParams.append((certificate, chain, pkey, password))
def do_setupPanda3D(self, p3dpythonName=None, p3dpythonwName=None):
""" A special convenience command that adds the minimum
startup modules for a panda3d package, intended for developers
producing their own custom panda3d for download. Should be
called before any other Python modules are named. """
# This module and all its dependencies come frozen into p3dpython.
# We should mark them as having already been added so that we don't
# add them again to the Multifile.
self.do_module('direct.showbase.VFSImporter')
self.currentPackage.freezer.done(addStartupModules=True)
self.currentPackage.freezer.writeCode(None)
self.currentPackage.addExtensionModules()
self.currentPackage.freezer.reset()
self.do_file('panda3d/core.pyd', newDir='panda3d')
# This is the key Python module that is imported at runtime to
# start an application running.
self.do_module('direct.p3d.AppRunner')
# This is the main program that drives the runtime Python. It
# is responsible for importing direct.p3d.AppRunner to start an
# application running. The program comes in two parts: an
# executable, and an associated dynamic library. Note that the
# .exe and .dll extensions are automatically replaced with the
# appropriate platform-specific extensions.
if self.platform.startswith('osx'):
# On Mac, we package up a P3DPython.app bundle. This
# includes specifications in the plist file to avoid
# creating a dock icon and stuff.
resources = []
# Find p3dpython.plist in the direct source tree.
import direct
plist = Filename(direct.__path__[0], 'plugin/p3dpython.plist')
## # Find panda3d.icns in the models tree.
## filename = Filename('plugin_images/panda3d.icns')
## found = filename.resolveFilename(getModelPath().getValue())
## if not found:
## found = filename.resolveFilename("models")
## if found:
## resources.append(filename)
self.do_makeBundle('P3DPython.app', plist, executable = 'p3dpython',
resources = resources, dependencyDir = '')
else:
# Anywhere else, we just ship the executable file p3dpython.exe.
if p3dpythonName is None:
p3dpythonName = 'p3dpython'
else:
self.do_config(p3dpython_name=p3dpythonName)
if self.platform.startswith('win'):
self.do_file('p3dpython.exe', newName=p3dpythonName+'.exe')
else:
self.do_file('p3dpython.exe', newName=p3dpythonName)
# The "Windows" executable appends a 'w' to whatever name is used
# above, unless an override name is explicitly specified.
if self.platform.startswith('win'):
if p3dpythonwName is None:
p3dpythonwName = p3dpythonName+'w'
else:
self.do_config(p3dpythonw_name=p3dpythonwName)
if self.platform.startswith('win'):
self.do_file('p3dpythonw.exe', newName=p3dpythonwName+'.exe')
else:
self.do_file('p3dpythonw.exe', newName=p3dpythonwName)
self.do_file('libp3dpython.dll')
def do_freeze(self, filename, compileToExe = False):
""" Freezes all of the current Python code into either an
executable (if compileToExe is true) or a dynamic library (if
it is false). The resulting compiled binary is added to the
current package under the indicated filename. The filename
should not include an extension; that will be added. """
if not self.currentPackage:
raise OutsideOfPackageError
package = self.currentPackage
freezer = package.freezer
if package.mainModule and not compileToExe:
self.notify.warning("Ignoring main_module for dll %s" % (filename))
package.mainModule = None
if not package.mainModule and compileToExe:
message = "No main_module specified for exe %s" % (filename)
raise PackagerError(message)
if package.mainModule:
moduleName, newName = package.mainModule
if compileToExe:
# If we're producing an exe, the main module must
# be called "__main__".
newName = '__main__'
package.mainModule = (moduleName, newName)
if newName not in freezer.modules:
freezer.addModule(moduleName, newName = newName)
else:
freezer.modules[newName] = freezer.modules[moduleName]
freezer.done(addStartupModules = compileToExe)
dirname = ''
basename = filename
if '/' in basename:
dirname, basename = filename.rsplit('/', 1)
dirname += '/'
basename = freezer.generateCode(basename, compileToExe = compileToExe)
package.addFile(Filename(basename), newName = dirname + basename,
deleteTemp = True, explicit = True, extract = True)
package.addExtensionModules()
if not package.platform:
package.platform = self.platform
# Reset the freezer for more Python files.
freezer.reset()
package.mainModule = None
def do_makeBundle(self, bundleName, plist, executable = None,
resources = None, dependencyDir = None):
""" Constructs a minimal OSX "bundle" consisting of an
executable and a plist file, with optional resource files
(such as icons), and adds it to the package under the given
name. """
contents = bundleName + '/Contents'
self.addFiles([plist], newName = contents + '/Info.plist',
extract = True)
if executable:
basename = Filename(executable).getBasename()
self.addFiles([executable], newName = contents + '/MacOS/' + basename,
extract = True, executable = True, dependencyDir = dependencyDir)
if resources:
self.addFiles(resources, newDir = contents + '/Resources',
extract = True, dependencyDir = dependencyDir)
def do_file(self, *args, **kw):
""" Adds the indicated file or files to the current package.
See addFiles(). """
self.addFiles(args, **kw)
def addFiles(self, filenames, text = None, newName = None,
newDir = None, extract = None, executable = None,
deleteTemp = False, literal = False,
dependencyDir = None, required = False):
""" Adds the indicated arbitrary files to the current package.
filenames is a list of Filename or string objects, and each
may include shell globbing characters.
Each file is placed in the named directory, or the toplevel
directory if no directory is specified.
Certain special behavior is invoked based on the filename
extension. For instance, .py files may be automatically
compiled and stored as Python modules.
If newDir is not None, it specifies the directory in which the
file should be placed. In this case, all files matched by the
filename expression are placed in the named directory.
If newName is not None, it specifies a new filename. In this
case, newDir is ignored, and the filename expression must
match only one file.
If newName and newDir are both None, the file is placed in the
toplevel directory, regardless of its source directory.
If text is nonempty, it contains the text of the file. In
this case, the filename is not read, but the supplied text is
used instead.
If extract is true, the file is explicitly extracted at
runtime.
If executable is true, the file is marked as an executable
filename, for special treatment.
If deleteTemp is true, the file is a temporary file and will
be deleted after its contents are copied to the package.
If literal is true, then the file extension will be respected
exactly as it appears, and glob characters will not be
expanded. If this is false, then .dll or .exe files will be
renamed to .dylib and no extension on OSX (or .so on Linux);
and glob characters will be expanded.
If required is true, then the file is marked a vital part of
the package. The package will not be built if this file
somehow cannot be added to the package.
"""
if not self.currentPackage:
raise OutsideOfPackageError
files = []
explicit = True
for filename in filenames:
filename = Filename(filename)
if literal:
thisFiles = [filename.toOsSpecific()]
else:
ext = filename.getExtension()
# A special case, since OSX and Linux don't have a
# standard extension for program files.
if executable is None and ext == 'exe':
executable = True
newExt = self.remapExtensions.get(ext, None)
if newExt is not None:
filename.setExtension(newExt)
thisFiles = glob.glob(filename.toOsSpecific())
if not thisFiles:
thisFiles = [filename.toOsSpecific()]
if newExt == 'dll' or (ext == 'dll' and newExt is None):
# Go through the dsoFilename interface on Windows,
# to insert a _d if we are running on a debug
# build.
dllFilename = Filename(filename)
dllFilename.setExtension('so')
dllFilename = Filename.dsoFilename(str(dllFilename))
if dllFilename != filename:
thisFiles = glob.glob(filename.toOsSpecific())
if not thisFiles:
# We have to resolve this filename to
# determine if it's a _d or not.
if self.resolveLibrary(dllFilename):
thisFiles = [dllFilename.toOsSpecific()]
else:
thisFiles = [filename.toOsSpecific()]
if len(thisFiles) > 1:
explicit = False
files += thisFiles
prefix = ''
if newDir is not None:
prefix = str(Filename(newDir))
if prefix and prefix[-1] != '/':
prefix += '/'
if newName:
if len(files) != 1:
message = 'Cannot install multiple files on target filename %s' % (newName)
raise PackagerError(message)
if text:
if len(files) != 1:
message = 'Cannot install text to multiple files'
raise PackagerError(message)
if not newName:
newName = str(filenames[0])
for filename in files:
filename = Filename.fromOsSpecific(filename)
basename = filename.getBasename()
name = newName
if not name:
name = prefix + basename
self.currentPackage.addFile(
filename, newName = name, extract = extract,
explicit = explicit, executable = executable,
text = text, deleteTemp = deleteTemp,
dependencyDir = dependencyDir, required = required)
def do_exclude(self, filename):
""" Marks the indicated filename as not to be included. The
filename may include shell globbing characters, and may or may
not include a dirname. (If it does not include a dirname, it
refers to any file with the given basename from any
directory.)"""
if not self.currentPackage:
raise OutsideOfPackageError
filename = Filename(filename)
self.currentPackage.excludeFile(filename)
def do_includeExtensions(self, executableExtensions = None, extractExtensions = None,
imageExtensions = None, textExtensions = None,
uncompressibleExtensions = None, unprocessedExtensions = None,
suppressWarningForExtensions = None):
""" Ensure that dir() will include files with the given extensions.
The extensions should not have '.' prefixes.
All except 'suppressWarningForExtensions' allow the given kinds of files
to be packaged with their respective semantics (read the source).
'suppressWarningForExtensions' lists extensions *expected* to be ignored,
so no warnings will be emitted for them.
"""
if executableExtensions:
self.executableExtensions += executableExtensions
if extractExtensions:
self.extractExtensions += extractExtensions
if imageExtensions:
self.imageExtensions += imageExtensions
if textExtensions:
self.textExtensions += textExtensions
if uncompressibleExtensions:
self.uncompressibleExtensions += uncompressibleExtensions
if unprocessedExtensions:
self.unprocessedExtensions += unprocessedExtensions
if suppressWarningForExtensions:
self.suppressWarningForExtensions += suppressWarningForExtensions
self._ensureExtensions()
def do_dir(self, dirname, newDir = None, unprocessed = None):
""" Adds the indicated directory hierarchy to the current
package. The directory hierarchy is walked recursively, and
all files that match a known extension are added to the package.
newDir specifies the directory name within the package which
the contents of the named directory should be installed to.
If it is omitted, the contents of the named directory are
installed to the root of the package.
If unprocessed is false (the default), bam files are loaded and
scanned for textures, and these texture paths within the bam
files are manipulated to point to the new paths within the
package. If unprocessed is true, this operation is bypassed,
and bam files are packed exactly as they are.
"""
if not self.currentPackage:
raise OutsideOfPackageError
dirname = Filename(dirname)
if not newDir:
newDir = ''
# Adding the directory to sys.path is a cheesy way to help the
# modulefinder find it.
sys.path.append(dirname.toOsSpecific())
self.__recurseDir(dirname, newDir, unprocessed = unprocessed)
def __recurseDir(self, filename, newName, unprocessed = None, packageTree = None):
if filename.isDirectory():
# It's a directory name. Recurse.
prefix = newName
if prefix and prefix[-1] != '/':
prefix += '/'
# First check if this is a Python package tree. If so, add it
# implicitly as a module.
dirList = vfs.scanDirectory(filename)
for subfile in dirList:
filename = subfile.getFilename()
if filename.getBasename() == '__init__.py':
moduleName = newName.replace("/", ".")
self.addModule([moduleName], filename=filename)
for subfile in dirList:
filename = subfile.getFilename()
self.__recurseDir(filename, prefix + filename.getBasename(),
unprocessed = unprocessed)
return
elif not filename.exists():
# It doesn't exist. Perhaps it's a virtual file. Ignore it.
return
# It's a file name. Add it.
ext = filename.getExtension()
if ext == 'py':
self.currentPackage.addFile(filename, newName = newName,
explicit = False, unprocessed = unprocessed)
else:
if ext == 'pz' or ext == 'gz':
# Strip off an implicit .pz extension.
newFilename = Filename(filename)
newFilename.setExtension('')
newFilename = Filename(str(newFilename))
ext = newFilename.getExtension()
if ext in self.knownExtensions:
if ext in self.textExtensions:
filename.setText()
else:
filename.setBinary()
self.currentPackage.addFile(filename, newName = newName,
explicit = False, unprocessed = unprocessed)
elif not ext in self.suppressWarningForExtensions:
newCount = self.currentPackage.ignoredDirFiles.get(ext, 0) + 1
self.currentPackage.ignoredDirFiles[ext] = newCount
if self.verbosePrint:
self.notify.warning("ignoring file %s" % filename)
def readContentsFile(self):
""" Reads the contents.xml file at the beginning of
processing. """
self.hosts = {}
# Since we've blown away the self.hosts map, we have to make
# sure that our own host at least is added to the map.
self.addHost(self.host)
self.maxAge = 0
self.contentsSeq = SeqValue()
self.contents = {}
self.contentsChanged = False
if not self.allowPackages:
# Don't bother.
return
contentsFilename = Filename(self.installDir, 'contents.xml')
doc = TiXmlDocument(contentsFilename.toOsSpecific())
if not doc.LoadFile():
# Couldn't read file.
return
xcontents = doc.FirstChildElement('contents')
if xcontents:
maxAge = xcontents.Attribute('max_age')
if maxAge:
self.maxAge = int(maxAge)
self.contentsSeq.loadXml(xcontents)
xhost = xcontents.FirstChildElement('host')
if xhost:
he = self.HostEntry()
he.loadXml(xhost, self)
self.hosts[he.url] = he
self.host = he.url
xpackage = xcontents.FirstChildElement('package')
while xpackage:
pe = self.PackageEntry()
pe.loadXml(xpackage)
self.contents[pe.getKey()] = pe
xpackage = xpackage.NextSiblingElement('package')
def writeContentsFile(self):
""" Rewrites the contents.xml file at the end of
processing. """
if not self.contentsChanged:
# No need to rewrite.
return
contentsFilename = Filename(self.installDir, 'contents.xml')
doc = TiXmlDocument(contentsFilename.toOsSpecific())
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xcontents = TiXmlElement('contents')
if self.maxAge:
xcontents.SetAttribute('max_age', str(self.maxAge))
self.contentsSeq += 1
self.contentsSeq.storeXml(xcontents)
if self.host:
he = self.hosts.get(self.host, None)
if he:
xhost = he.makeXml(packager = self)
xcontents.InsertEndChild(xhost)
contents = sorted(self.contents.items())
for key, pe in contents:
xpackage = pe.makeXml()
xcontents.InsertEndChild(xpackage)
doc.InsertEndChild(xcontents)
doc.SaveFile()
# The following class and function definitions represent a few sneaky
# Python tricks to allow the pdef syntax to contain the pseudo-Python
# code they do. These tricks bind the function and class definitions
# into a bit table as they are parsed from the pdef file, so we can
# walk through that table later and perform the operations requested
# in order.
class metaclass_def(type):
""" A metaclass is invoked by Python when the class definition is
read, for instance to define a child class. By defining a
metaclass for class_p3d and class_package, we can get a callback
when we encounter "class foo(p3d)" in the pdef file. The callback
actually happens after all of the code within the class scope has
been parsed first. """
def __new__(self, name, bases, dict):
# At the point of the callback, now, "name" is the name of the
# class we are instantiating, "bases" is the list of parent
# classes, and "dict" is the class dictionary we have just
# parsed.
# If "dict" contains __metaclass__, then we must be parsing
# class_p3d or class_ppackage, below--skip it. But if it
# doesn't contain __metaclass__, then we must be parsing
# "class foo(p3d)" (or whatever) from the pdef file.
if '__metaclass__' not in dict:
# Get the context in which this class was created
# (presumably, the module scope) out of the stack frame.
frame = sys._getframe(1)
mdict = frame.f_locals
lineno = frame.f_lineno
# Store the class name on a statements list in that
# context, so we can later resolve the class names in
# the order they appeared in the file.
mdict.setdefault('__statements', []).append((lineno, 'class', name, None, None))
return type.__new__(self, name, bases, dict)
# Define these dynamically to stay compatible with Python 2 and 3.
class_p3d = metaclass_def(str('class_p3d'), (), {})
class_package = metaclass_def(str('class_package'), (), {})
class_solo = metaclass_def(str('class_solo'), (), {})
class func_closure:
""" This class is used to create a closure on the function name,
and also allows the *args, **kw syntax. In Python, the lambda
syntax, used with default parameters, is used more often to create
a closure (that is, a binding of one or more variables into a
callable object), but that syntax doesn't work with **kw.
Fortunately, a class method is also a form of a closure, because
it binds self; and this doesn't have any syntax problems with
**kw. """
def __init__(self, name):
self.name = name
def generic_func(self, *args, **kw):
""" This method is bound to all the functions that might be
called from the pdef file. It's a special function; when it is
called, it does nothing but store its name and arguments in the
caller's local scope, where they can be pulled out later. """
# Get the context in which this function was called (presumably,
# the class dictionary) out of the stack frame.
frame = sys._getframe(1)
cldict = frame.f_locals
lineno = frame.f_lineno
# Store the function on a statements list in that context, so we
# can later walk through the function calls for each class.
cldict.setdefault('__statements', []).append((lineno, 'func', self.name, args, kw))
|
axsauze/eventsfinder | refs/heads/master | django/core/mail/backends/locmem.py | 227 | """
Backend for test environment.
"""
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""A email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super(EmailBackend, self).__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
for message in messages: # .message() triggers header validation
message.message()
mail.outbox.extend(messages)
return len(messages)
|
ashwinreddy/rlg | refs/heads/master | test.py | 1 | import argparse
import logging
import sys
import gym
from gym import wrappers
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space):
self.action_space = action_space
def act(self, observation, reward, done):
return self.action_space.sample()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id', nargs='?', default='CartPole-v0', help='Select the environment to run')
args = parser.parse_args()
# Call `undo_logger_setup` if you want to undo Gym's logger setup
# and configure things manually. (The default should be fine most
# of the time.)
gym.undo_logger_setup()
logger = logging.getLogger()
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
# You can set the level to logging.DEBUG or logging.WARN if you
# want to change the amount of output.
logger.setLevel(logging.INFO)
env = gym.make(args.env_id)
# You provide the directory to write to (can be an existing
# directory, including one with existing data -- all monitor files
# will be namespaced). You can also dump to a tempdir if you'd
# like: tempfile.mkdtemp().
outdir = '/tmp/random-agent-results'
env = wrappers.Monitor(directory=outdir, force=True)(env)
env.seed(0)
agent = RandomAgent(env.action_space)
episode_count = 100
reward = 0
done = False
for i in range(episode_count):
ob = env.reset()
while True:
action = agent.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
if done:
break
# Note there's no env.render() here. But the environment still can open window and
# render if asked by env.monitor: it calls env.render('rgb_array') to record video.
# Video is not recorded every episode, see capped_cubic_video_schedule for details.
# Close the env and write monitor result info to disk
env.close()
|
Johnzero/OE7 | refs/heads/master | openerp/report/render/rml2pdf/color.py | 443 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab.lib import colors
import re
allcols = colors.getAllNamedColors()
regex_t = re.compile('\(([0-9\.]*),([0-9\.]*),([0-9\.]*)\)')
regex_h = re.compile('#([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])')
def get(col_str):
if col_str is None:
col_str = ''
global allcols
if col_str in allcols.keys():
return allcols[col_str]
res = regex_t.search(col_str, 0)
if res:
return float(res.group(1)), float(res.group(2)), float(res.group(3))
res = regex_h.search(col_str, 0)
if res:
return tuple([ float(int(res.group(i),16))/255 for i in range(1,4)])
return colors.red
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.