repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
eddiel/Play
|
refs/heads/master
|
gluon/main.py
|
9
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The gluon wsgi application
---------------------------
"""
if False: import import_all # DO NOT REMOVE PART OF FREEZE PROCESS
import gc
import Cookie
import os
import re
import copy
import sys
import time
import datetime
import signal
import socket
import random
import urllib2
import string
try:
import simplejson as sj #external installed library
except:
try:
import json as sj #standard installed library
except:
import gluon.contrib.simplejson as sj #pure python library
from thread import allocate_lock
from gluon.fileutils import abspath, write_file
from gluon.settings import global_settings
from gluon.utils import web2py_uuid
from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders
from gluon.globals import current
# Remarks:
# calling script has inserted path to script directory into sys.path
# applications_parent (path to applications/, site-packages/ etc)
# defaults to that directory set sys.path to
# ("", gluon_parent/site-packages, gluon_parent, ...)
#
# this is wrong:
# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# because we do not want the path to this file which may be Library.zip
# gluon_parent is the directory containing gluon, web2py.py, logging.conf
# and the handlers.
# applications_parent (web2py_path) is the directory containing applications/
# and routes.py
# The two are identical unless web2py_path is changed via the web2py.py -f folder option
# main.web2py_path is the same as applications_parent (for backward compatibility)
web2py_path = global_settings.applications_parent # backward compatibility
create_missing_folders()
# set up logging for subsequent imports
import logging
import logging.config
# This needed to prevent exception on Python 2.5:
# NameError: name 'gluon' is not defined
# See http://bugs.python.org/issue1436
# attention!, the import Tkinter in messageboxhandler, changes locale ...
import gluon.messageboxhandler
logging.gluon = gluon
# so we must restore it! Thanks ozancag
import locale
locale.setlocale(locale.LC_CTYPE, "C") # IMPORTANT, web2py requires locale "C"
exists = os.path.exists
pjoin = os.path.join
logpath = abspath("logging.conf")
if exists(logpath):
logging.config.fileConfig(abspath("logging.conf"))
else:
logging.basicConfig()
logger = logging.getLogger("web2py")
from gluon.restricted import RestrictedError
from gluon.http import HTTP, redirect
from gluon.globals import Request, Response, Session
from gluon.compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from gluon.contenttype import contenttype
from gluon.dal import BaseAdapter
from gluon.validators import CRYPT
from gluon.html import URL, xmlescape
from gluon.utils import is_valid_ip_address, getipaddrinfo
from gluon.rewrite import load, url_in, THREAD_LOCAL as rwthread, \
try_rewrite_on_error, fixup_missing_path_info
from gluon import newcron
__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']
requests = 0 # gc timer
# Security Checks: validate URL and session_id here,
# accept_language is validated in languages
# pattern used to validate client address
regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6
try:
version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r')
raw_version_string = version_info.read().split()[-1].strip()
version_info.close()
global_settings.web2py_version = raw_version_string
web2py_version = global_settings.web2py_version
except:
raise RuntimeError("Cannot determine web2py version")
try:
from gluon import rocket
except:
if not global_settings.web2py_runtime_gae:
logger.warn('unable to import Rocket')
load()
HTTPS_SCHEMES = set(('https', 'HTTPS'))
def get_client(env):
"""
Guesses the client address from the environment variables
First tries 'http_x_forwarded_for', secondly 'remote_addr'
if all fails, assume '127.0.0.1' or '::1' (running locally)
"""
eget = env.get
g = regex_client.search(eget('http_x_forwarded_for', ''))
client = (g.group() or '').split(',')[0] if g else None
if client in (None, '', 'unknown'):
g = regex_client.search(eget('remote_addr', ''))
if g:
client = g.group()
elif env.http_host.startswith('['): # IPv6
client = '::1'
else:
client = '127.0.0.1' # IPv4
if not is_valid_ip_address(client):
raise HTTP(400, "Bad Request (request.client=%s)" % client)
return client
def serve_controller(request, response, session):
"""
This function is used to generate a dynamic page.
It first runs all models, then runs the function in the controller,
and then tries to render the output using a view/template.
this function must run from the [application] folder.
A typical example would be the call to the url
/[application]/[controller]/[function] that would result in a call
to [function]() in applications/[application]/[controller].py
rendered by applications/[application]/views/[controller]/[function].html
"""
# ##################################################
# build environment for controller and view
# ##################################################
environment = build_environment(request, response, session)
# set default view, controller can override it
response.view = '%s/%s.%s' % (request.controller,
request.function,
request.extension)
# also, make sure the flash is passed through
# ##################################################
# process models, controller and view (if required)
# ##################################################
run_models_in(environment)
response._view_environment = copy.copy(environment)
page = run_controller_in(request.controller, request.function, environment)
if isinstance(page, dict):
response._vars = page
response._view_environment.update(page)
run_view_in(response._view_environment)
page = response.body.getvalue()
# logic to garbage collect after exec, not always, once every 100 requests
global requests
requests = ('requests' in globals()) and (requests + 1) % 100 or 0
if not requests:
gc.collect()
# end garbage collection logic
# ##################################################
# set default headers it not set
# ##################################################
default_headers = [
('Content-Type', contenttype('.' + request.extension)),
('Cache-Control',
'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),
('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())),
('Pragma', 'no-cache')]
for key, value in default_headers:
response.headers.setdefault(key, value)
raise HTTP(response.status, page, **response.headers)
class LazyWSGI(object):
def __init__(self, environ, request, response):
self.wsgi_environ = environ
self.request = request
self.response = response
@property
def environ(self):
if not hasattr(self,'_environ'):
new_environ = self.wsgi_environ
new_environ['wsgi.input'] = self.request.body
new_environ['wsgi.version'] = 1
self._environ = new_environ
return self._environ
def start_response(self,status='200', headers=[], exec_info=None):
"""
in controller you can use:
- request.wsgi.environ
- request.wsgi.start_response
to call third party WSGI applications
"""
self.response.status = str(status).split(' ', 1)[0]
self.response.headers = dict(headers)
return lambda *args, **kargs: \
self.response.write(escape=False, *args, **kargs)
def middleware(self,*middleware_apps):
"""
In you controller use::
@request.wsgi.middleware(middleware1, middleware2, ...)
to decorate actions with WSGI middleware. actions must return strings.
uses a simulated environment so it may have weird behavior in some cases
"""
def middleware(f):
def app(environ, start_response):
data = f()
start_response(self.response.status,
self.response.headers.items())
if isinstance(data, list):
return data
return [data]
for item in middleware_apps:
app = item(app)
def caller(app):
return app(self.environ, self.start_response)
return lambda caller=caller, app=app: caller(app)
return middleware
def wsgibase(environ, responder):
"""
The gluon wsgi application. The first function called when a page
is requested (static or dynamic). It can be called by paste.httpserver
or by apache mod_wsgi (or any WSGI-compatible server).
- fills request with info
- the environment variables, replacing '.' with '_'
- adds web2py path and version info
- compensates for fcgi missing path_info and query_string
- validates the path in url
The url path must be either:
1. for static pages:
- /<application>/static/<file>
2. for dynamic pages:
- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]
The naming conventions are:
- application, controller, function and extension may only contain
`[a-zA-Z0-9_]`
- file and sub may also contain '-', '=', '.' and '/'
"""
eget = environ.get
current.__dict__.clear()
request = Request(environ)
response = Response()
session = Session()
env = request.env
#env.web2py_path = global_settings.applications_parent
env.web2py_version = web2py_version
#env.update(global_settings)
static_file = False
try:
try:
try:
# ##################################################
# handle fcgi missing path_info and query_string
# select rewrite parameters
# rewrite incoming URL
# parse rewritten header variables
# parse rewritten URL
# serve file if static
# ##################################################
fixup_missing_path_info(environ)
(static_file, version, environ) = url_in(request, environ)
response.status = env.web2py_status_code or response.status
if static_file:
if eget('QUERY_STRING', '').startswith('attachment'):
response.headers['Content-Disposition'] \
= 'attachment'
if version:
response.headers['Cache-Control'] = 'max-age=315360000'
response.headers[
'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT'
response.stream(static_file, request=request)
# ##################################################
# fill in request items
# ##################################################
app = request.application # must go after url_in!
if not global_settings.local_hosts:
local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1'])
if not global_settings.web2py_runtime_gae:
try:
fqdn = socket.getfqdn()
local_hosts.add(socket.gethostname())
local_hosts.add(fqdn)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(fqdn)])
if env.server_name:
local_hosts.add(env.server_name)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(env.server_name)])
except (socket.gaierror, TypeError):
pass
global_settings.local_hosts = list(local_hosts)
else:
local_hosts = global_settings.local_hosts
client = get_client(env)
x_req_with = str(env.http_x_requested_with).lower()
request.update(
client = client,
folder = abspath('applications', app) + os.sep,
ajax = x_req_with == 'xmlhttprequest',
cid = env.http_web2py_component_element,
is_local = env.remote_addr in local_hosts,
is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \
request.env.http_x_forwarded_proto in HTTPS_SCHEMES \
or env.https == 'on'
)
request.compute_uuid() # requires client
request.url = environ['PATH_INFO']
# ##################################################
# access the requested application
# ##################################################
disabled = pjoin(request.folder, 'DISABLED')
if not exists(request.folder):
if app == rwthread.routes.default_application \
and app != 'welcome':
redirect(URL('welcome', 'default', 'index'))
elif rwthread.routes.error_handler:
_handler = rwthread.routes.error_handler
redirect(URL(_handler['application'],
_handler['controller'],
_handler['function'],
args=app))
else:
raise HTTP(404, rwthread.routes.error_message
% 'invalid request',
web2py_error='invalid application')
elif not request.is_local and exists(disabled):
raise HTTP(503, "<html><body><h1>Temporarily down for maintenance</h1></body></html>")
# ##################################################
# build missing folders
# ##################################################
create_missing_app_folders(request)
# ##################################################
# get the GET and POST data
# ##################################################
#parse_get_post_vars(request, environ)
# ##################################################
# expose wsgi hooks for convenience
# ##################################################
request.wsgi = LazyWSGI(environ, request, response)
# ##################################################
# load cookies
# ##################################################
if env.http_cookie:
try:
request.cookies.load(env.http_cookie)
except Cookie.CookieError, e:
pass # invalid cookies
# ##################################################
# try load session or create new session file
# ##################################################
if not env.web2py_disable_session:
session.connect(request, response)
# ##################################################
# run controller
# ##################################################
if global_settings.debugging and app != "admin":
import gluon.debug
# activate the debugger
gluon.debug.dbg.do_debug(mainpyfile=request.folder)
serve_controller(request, response, session)
except HTTP, http_response:
if static_file:
return http_response.to(responder, env=env)
if request.body:
request.body.close()
if hasattr(current,'request'):
# ##################################################
# on success, try store session in database
# ##################################################
session._try_store_in_db(request, response)
# ##################################################
# on success, commit database
# ##################################################
if response.do_not_commit is True:
BaseAdapter.close_all_instances(None)
elif response.custom_commit:
BaseAdapter.close_all_instances(response.custom_commit)
else:
BaseAdapter.close_all_instances('commit')
# ##################################################
# if session not in db try store session on filesystem
# this must be done after trying to commit database!
# ##################################################
session._try_store_in_cookie_or_file(request, response)
# Set header so client can distinguish component requests.
if request.cid:
http_response.headers.setdefault(
'web2py-component-content', 'replace')
if request.ajax:
if response.flash:
http_response.headers['web2py-component-flash'] = \
urllib2.quote(xmlescape(response.flash)\
.replace('\n',''))
if response.js:
http_response.headers['web2py-component-command'] = \
urllib2.quote(response.js.replace('\n',''))
# ##################################################
# store cookies in headers
# ##################################################
session._fixup_before_save()
http_response.cookies2headers(response.cookies)
ticket = None
except RestrictedError, e:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
# log tickets before rollback if not in DB
if not request.tickets_db:
ticket = e.log(request) or 'unknown'
# rollback
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
# if tickets in db, reconnect and store it in db
if request.tickets_db:
ticket = e.log(request) or 'unknown'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket %
dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
except:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
try:
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
except:
pass
e = RestrictedError('Framework', '', '', locals())
ticket = e.log(request) or 'unrecoverable'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket
% dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
finally:
if response and hasattr(response, 'session_file') \
and response.session_file:
response.session_file.close()
session._unlock(response)
http_response, new_environ = try_rewrite_on_error(
http_response, request, environ, ticket)
if not http_response:
return wsgibase(new_environ, responder)
if global_settings.web2py_crontype == 'soft':
newcron.softcron(global_settings.applications_parent).start()
return http_response.to(responder, env=env)
def save_password(password, port):
"""
Used by main() to save the password in the parameters_port.py file.
"""
password_file = abspath('parameters_%i.py' % port)
if password == '<random>':
# make up a new password
chars = string.letters + string.digits
password = ''.join([random.choice(chars) for i in range(8)])
cpassword = CRYPT()(password)[0]
print '******************* IMPORTANT!!! ************************'
print 'your admin password is "%s"' % password
print '*********************************************************'
elif password == '<recycle>':
# reuse the current password if any
if exists(password_file):
return
else:
password = ''
elif password.startswith('<pam_user:'):
# use the pam password for specified user
cpassword = password[1:-1]
else:
# use provided password
cpassword = CRYPT()(password)[0]
fp = open(password_file, 'w')
if password:
fp.write('password="%s"\n' % cpassword)
else:
fp.write('password=None\n')
fp.close()
def appfactory(wsgiapp=wsgibase,
logfilename='httpserver.log',
profiler_dir=None,
profilerfilename=None):
"""
generates a wsgi application that does logging and profiling and calls
wsgibase
Args:
wsgiapp: the base application
logfilename: where to store apache-compatible requests log
profiler_dir: where to store profile files
"""
if profilerfilename is not None:
raise BaseException("Deprecated API")
if profiler_dir:
profiler_dir = abspath(profiler_dir)
logger.warn('profiler is on. will use dir %s', profiler_dir)
if not os.path.isdir(profiler_dir):
try:
os.makedirs(profiler_dir)
except:
raise BaseException("Can't create dir %s" % profiler_dir)
filepath = pjoin(profiler_dir, 'wtest')
try:
filehandle = open( filepath, 'w' )
filehandle.close()
os.unlink(filepath)
except IOError:
raise BaseException("Unable to write to dir %s" % profiler_dir)
def app_with_logging(environ, responder):
"""
a wsgi app that does logging and profiling and calls wsgibase
"""
status_headers = []
def responder2(s, h):
"""
wsgi responder app
"""
status_headers.append(s)
status_headers.append(h)
return responder(s, h)
time_in = time.time()
ret = [0]
if not profiler_dir:
ret[0] = wsgiapp(environ, responder2)
else:
import cProfile
prof = cProfile.Profile()
prof.enable()
ret[0] = wsgiapp(environ, responder2)
prof.disable()
destfile = pjoin(profiler_dir, "req_%s.prof" % web2py_uuid())
prof.dump_stats(destfile)
try:
line = '%s, %s, %s, %s, %s, %s, %f\n' % (
environ['REMOTE_ADDR'],
datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
environ['REQUEST_METHOD'],
environ['PATH_INFO'].replace(',', '%2C'),
environ['SERVER_PROTOCOL'],
(status_headers[0])[:3],
time.time() - time_in,
)
if not logfilename:
sys.stdout.write(line)
elif isinstance(logfilename, str):
write_file(logfilename, line, 'a')
else:
logfilename.write(line)
except:
pass
return ret[0]
return app_with_logging
class HttpServer(object):
"""
the web2py web server (Rocket)
"""
def __init__(
self,
ip='127.0.0.1',
port=8000,
password='',
pid_filename='httpserver.pid',
log_filename='httpserver.log',
profiler_dir=None,
ssl_certificate=None,
ssl_private_key=None,
ssl_ca_certificate=None,
min_threads=None,
max_threads=None,
server_name=None,
request_queue_size=5,
timeout=10,
socket_timeout=1,
shutdown_timeout=None, # Rocket does not use a shutdown timeout
path=None,
interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string
):
"""
starts the web server.
"""
if interfaces:
# if interfaces is specified, it must be tested for rocket parameter correctness
# not necessarily completely tested (e.g. content of tuples or ip-format)
import types
if isinstance(interfaces, types.ListType):
for i in interfaces:
if not isinstance(i, types.TupleType):
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
else:
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
if path:
# if a path is specified change the global variables so that web2py
# runs from there instead of cwd or os.environ['web2py_path']
global web2py_path
path = os.path.normpath(path)
web2py_path = path
global_settings.applications_parent = path
os.chdir(path)
[add_path_first(p) for p in (path, abspath('site-packages'), "")]
if exists("logging.conf"):
logging.config.fileConfig("logging.conf")
save_password(password, port)
self.pid_filename = pid_filename
if not server_name:
server_name = socket.gethostname()
logger.info('starting web server...')
rocket.SERVER_NAME = server_name
rocket.SOCKET_TIMEOUT = socket_timeout
sock_list = [ip, port]
if not ssl_certificate or not ssl_private_key:
logger.info('SSL is off')
elif not rocket.ssl:
logger.warning('Python "ssl" module unavailable. SSL is OFF')
elif not exists(ssl_certificate):
logger.warning('unable to open SSL certificate. SSL is OFF')
elif not exists(ssl_private_key):
logger.warning('unable to open SSL private key. SSL is OFF')
else:
sock_list.extend([ssl_private_key, ssl_certificate])
if ssl_ca_certificate:
sock_list.append(ssl_ca_certificate)
logger.info('SSL is ON')
app_info = {'wsgi_app': appfactory(wsgibase,
log_filename,
profiler_dir)}
self.server = rocket.Rocket(interfaces or tuple(sock_list),
method='wsgi',
app_info=app_info,
min_threads=min_threads,
max_threads=max_threads,
queue_size=int(request_queue_size),
timeout=int(timeout),
handle_signals=False,
)
def start(self):
"""
start the web server
"""
try:
signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())
signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())
except:
pass
write_file(self.pid_filename, str(os.getpid()))
self.server.start()
def stop(self, stoplogging=False):
"""
stop cron and the web server
"""
newcron.stopcron()
self.server.stop(stoplogging)
try:
os.unlink(self.pid_filename)
except:
pass
|
jairideout/qiime2
|
refs/heads/master
|
qiime2/core/__init__.py
|
43
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/extractor/jamendo.py
|
43
|
# coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_urlparse
from .common import InfoExtractor
from ..utils import parse_duration
class JamendoBaseIE(InfoExtractor):
def _extract_meta(self, webpage, fatal=True):
title = self._og_search_title(
webpage, default=None) or self._search_regex(
r'<title>([^<]+)', webpage,
'title', default=None)
if title:
title = self._search_regex(
r'(.+?)\s*\|\s*Jamendo Music', title, 'title', default=None)
if not title:
title = self._html_search_meta(
'name', webpage, 'title', fatal=fatal)
mobj = re.search(r'(.+) - (.+)', title or '')
artist, second = mobj.groups() if mobj else [None] * 2
return title, artist, second
class JamendoIE(JamendoBaseIE):
_VALID_URL = r'https?://(?:www\.)?jamendo\.com/track/(?P<id>[0-9]+)/(?P<display_id>[^/?#&]+)'
_TEST = {
'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i',
'md5': '6e9e82ed6db98678f171c25a8ed09ffd',
'info_dict': {
'id': '196219',
'display_id': 'stories-from-emona-i',
'ext': 'flac',
'title': 'Maya Filipič - Stories from Emona I',
'artist': 'Maya Filipič',
'track': 'Stories from Emona I',
'duration': 210,
'thumbnail': r're:^https?://.*\.jpg'
}
}
def _real_extract(self, url):
mobj = self._VALID_URL_RE.match(url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title, artist, track = self._extract_meta(webpage)
formats = [{
'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294'
% (sub_domain, track_id, format_id),
'format_id': format_id,
'ext': ext,
'quality': quality,
} for quality, (format_id, sub_domain, ext) in enumerate((
('mp31', 'mp3l', 'mp3'),
('mp32', 'mp3d', 'mp3'),
('ogg1', 'ogg', 'ogg'),
('flac', 'flac', 'flac'),
))]
self._sort_formats(formats)
thumbnail = self._html_search_meta(
'image', webpage, 'thumbnail', fatal=False)
duration = parse_duration(self._search_regex(
r'<span[^>]+itemprop=["\']duration["\'][^>]+content=["\'](.+?)["\']',
webpage, 'duration', fatal=False))
return {
'id': track_id,
'display_id': display_id,
'thumbnail': thumbnail,
'title': title,
'duration': duration,
'artist': artist,
'track': track,
'formats': formats
}
class JamendoAlbumIE(JamendoBaseIE):
_VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'https://www.jamendo.com/album/121486/duck-on-cover',
'info_dict': {
'id': '121486',
'title': 'Shearer - Duck On Cover'
},
'playlist': [{
'md5': 'e1a2fcb42bda30dfac990212924149a8',
'info_dict': {
'id': '1032333',
'ext': 'flac',
'title': 'Shearer - Warmachine',
'artist': 'Shearer',
'track': 'Warmachine',
}
}, {
'md5': '1f358d7b2f98edfe90fd55dac0799d50',
'info_dict': {
'id': '1032330',
'ext': 'flac',
'title': 'Shearer - Without Your Ghost',
'artist': 'Shearer',
'track': 'Without Your Ghost',
}
}],
'params': {
'playlistend': 2
}
}
def _real_extract(self, url):
mobj = self._VALID_URL_RE.match(url)
album_id = mobj.group('id')
webpage = self._download_webpage(url, mobj.group('display_id'))
title, artist, album = self._extract_meta(webpage, fatal=False)
entries = [{
'_type': 'url_transparent',
'url': compat_urlparse.urljoin(url, m.group('path')),
'ie_key': JamendoIE.ie_key(),
'id': self._search_regex(
r'/track/(\d+)', m.group('path'), 'track id', default=None),
'artist': artist,
'album': album,
} for m in re.finditer(
r'<a[^>]+href=(["\'])(?P<path>(?:(?!\1).)+)\1[^>]+class=["\'][^>]*js-trackrow-albumpage-link',
webpage)]
return self.playlist_result(entries, album_id, title)
|
xsank/SVNchecker
|
refs/heads/master
|
structure/transaction.py
|
1
|
'''
Created on 2013-10-8
@author: xsank
'''
import os
import shutil
import tempfile
import process
from checker import jsonvalid
from checker import xmlvalid
from checker import logvalid
class Transaction:
def __init__(self,repos,txn):
self.repos=repos
self.txn=str(txn)
try:
int(self.txn)
self.type="revision"
except ValueError:
self.type="transaction"
self.tmpdir=tempfile.mkdtemp()
def __executeSVN(self,command,arg="",split=False):
command="svnlook --%s %s %s %s %s" % (self.type,self.txn,command,self.repos,arg)
output=process.execute(command)
if split:
output=[x.strip() for x in output.split("\n") if x.strip()]
return output
def get_file(self,filename):
tmpfile=os.path.join(self.tmpdir,filename)
if(os.path.exists(tmpfile)):
return tmpfile
content=self.__executeSVN("cat", "\""+filename+"\"")
dirname=os.path.dirname(filename)
tmpdir=os.path.join(self.tmpdir,dirname)
if dirname and not os.path.exists(tmpdir):
os.makedirs(tmpdir)
fd=open(tmpfile,"w")
fd.write(content)
fd.flush()
fd.close()
return tmpfile
def get_files(self):
output=self.__executeSVN("changed", split=True)
files={}
for entry in output:
attrs=entry[0:3].strip()
filename=entry[4:].strip()
if(len(filename.split("."))>1):
files[filename]=attrs
return files
def get_user(self):
user=self.__executeSVN("author")
return user.strip()
def get_cmtlog(self):
output=self.__executeSVN("info",split=True)
temp=output[3:]
msg="".join(temp)
return msg.strip()
def cleanup(self):
shutil.rmtree(self.tmpdir)
def check(self):
msg,exitCode=logvalid.run(self.get_cmtlog())
if exitCode!=0:
return (msg,exitCode)
else:
for file in self.get_files().keys():
msg=jsonvalid.run(self.get_file(file))[0] or xmlvalid.run(self.get_file(file))[0]
exitCode=jsonvalid.run(self.get_file(file))[1] or xmlvalid.run(self.get_file(file))[1]
return (msg,exitCode)
|
lbernail/demotiad
|
refs/heads/master
|
app/vote/app.py
|
1
|
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
import consul
option_a = os.getenv('OPTION_A', "vim")
option_b = os.getenv('OPTION_B', "emacs")
color = os.getenv('COLOR', "white")
consul_host = os.getenv('CONSUL_HOST', None)
hostname = socket.gethostname()
app = Flask(__name__)
def get_consul():
if not hasattr(g, 'consul'):
if consul_host is None:
g.consul = None
else:
g.consul=consul.Consul(host=consul_host,port=8500)
return g.consul
def get_redis():
if not hasattr(g, 'redis'):
consul=get_consul()
redis_host = 'redis'
redis_port = '6379'
if consul is not None:
(index,redis_svc) = consul.catalog.service('redis')
if len(redis_svc)>0:
redis_host = redis_svc[0]["ServiceAddress"]
if redis_host == "":
redis_host = redis_svc[0]["Address"]
redis_port = redis_svc[0]["ServicePort"]
g.redis = Redis(host=redis_host, port=redis_port, db=0, socket_timeout=5)
return g.redis
def set_scores(voter_id,vote):
redis = get_redis()
stored_vote = redis.hget('votes',voter_id)
if stored_vote != vote:
redis.hset('votes',voter_id,vote)
redis.zincrby('scores',vote,1)
if stored_vote is not None:
# Existing vote, removing
redis.zincrby('scores',stored_vote,-1)
def get_scores():
redis = get_redis()
score_a = redis.zscore('scores','a')
if score_a is None:
score_a = 0
score_b = redis.zscore('scores','b')
if score_b is None:
score_b = 0
return (score_a, score_b)
def is_enabled_feature(feature,color):
consul = get_consul()
if consul is not None:
index, key = consul.kv.get("features/"+feature+"/"+color)
if key is not None:
if key["Value"] == "enabled":
return True
return False
def get_param(param,color,default_value):
consul = get_consul()
if consul is not None:
index, key = consul.kv.get("params/"+param+"/"+color)
if key is not None:
return key["Value"]
return default_value
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
vote = request.form['vote']
set_scores(voter_id,vote)
(score_a,score_b) = get_scores()
score_a = str(int(score_a))
score_b = str(int(score_b))
title=get_param("title",color,"DEVOPS D-DAY")
message = "Served by stack " + color
if is_enabled_feature("containerid",color):
message=message+" on container "+ hostname
display_banner = is_enabled_feature("banner",color)
resp = make_response(render_template(
'index.html',
title=title,
option_a=option_a,
option_b=option_b,
score_a=score_a,
score_b=score_b,
message=message,
display_banner=display_banner,
vote=vote
))
resp.set_cookie('voter_id', voter_id)
resp.headers["X-Color"] = color
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
valexandersaulys/airbnb_kaggle_contest
|
refs/heads/master
|
venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/tensor/slinalg.py
|
6
|
import logging
import warnings
from six.moves import xrange
import numpy
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
from theano import tensor
import theano.tensor
from theano.tensor import as_tensor_variable
from theano.gof import Op, Apply
logger = logging.getLogger(__name__)
MATRIX_STRUCTURES = (
'general',
'symmetric',
'lower_triangular',
'upper_triangular',
'hermitian',
'banded',
'diagonal',
'toeplitz')
class Cholesky(Op):
"""
Return a triangular matrix square root of positive semi-definite `x`.
L = cholesky(X, lower=True) implies dot(L, L.T) == X.
"""
# TODO: inplace
# TODO: for specific dtypes
# TODO: LAPACK wrapper with in-place behavior, for solve also
__props__ = ('lower', 'destructive')
def __init__(self, lower=True):
self.lower = lower
self.destructive = False
def infer_shape(self, node, shapes):
return [shapes[0]]
def make_node(self, x):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Cholesky op")
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
z[0] = scipy.linalg.cholesky(x, lower=self.lower).astype(x.dtype)
def grad(self, inputs, gradients):
return [CholeskyGrad(self.lower)(inputs[0], self(inputs[0]),
gradients[0])]
cholesky = Cholesky()
class CholeskyGrad(Op):
"""
"""
__props__ = ('lower', 'destructive')
def __init__(self, lower=True):
self.lower = lower
self.destructive = False
def make_node(self, x, l, dz):
x = as_tensor_variable(x)
l = as_tensor_variable(l)
dz = as_tensor_variable(dz)
assert x.ndim == 2
assert l.ndim == 2
assert dz.ndim == 2
assert l.owner.op.lower == self.lower, (
"lower/upper mismatch between Cholesky op and CholeskyGrad op"
)
return Apply(self, [x, l, dz], [x.type()])
def perform(self, node, inputs, outputs):
"""
Implements the "reverse-mode" gradient [1]_ for the
Cholesky factorization of a positive-definite matrix.
References
----------
.. [1] S. P. Smith. "Differentiation of the Cholesky Algorithm".
Journal of Computational and Graphical Statistics,
Vol. 4, No. 2 (Jun.,1995), pp. 134-147
http://www.jstor.org/stable/1390762
"""
x = inputs[0]
L = inputs[1]
dz = inputs[2]
dx = outputs[0]
N = x.shape[0]
if self.lower:
F = numpy.tril(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
F[i, k] -= F[i, j] * L[j, k]
F[j, k] -= F[i, j] * L[i, k]
for j in xrange(k + 1, N):
F[j, k] /= L[k, k]
F[k, k] -= L[j, k] * F[j, k]
F[k, k] /= (2 * L[k, k])
else:
F = numpy.triu(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
F[k, i] -= F[j, i] * L[k, j]
F[k, j] -= F[j, i] * L[k, i]
for j in xrange(k + 1, N):
F[k, j] /= L[k, k]
F[k, k] -= L[k, j] * F[k, j]
F[k, k] /= (2 * L[k, k])
dx[0] = F
def infer_shape(self, node, shapes):
return [shapes[0]]
class Solve(Op):
"""
Solve a system of linear equations.
"""
__props__ = ('A_structure', 'lower', 'overwrite_A', 'overwrite_b')
def __init__(self,
A_structure='general',
lower=False,
overwrite_A=False,
overwrite_b=False):
if A_structure not in MATRIX_STRUCTURES:
raise ValueError('Invalid matrix structure argument', A_structure)
self.A_structure = A_structure
self.lower = lower
self.overwrite_A = overwrite_A
self.overwrite_b = overwrite_b
def __repr__(self):
return 'Solve{%s}' % str(self._props())
def make_node(self, A, b):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Solve op")
A = as_tensor_variable(A)
b = as_tensor_variable(b)
assert A.ndim == 2
assert b.ndim in [1, 2]
otype = tensor.tensor(
broadcastable=b.broadcastable,
dtype=(A * b).dtype)
return Apply(self, [A, b], [otype])
def perform(self, node, inputs, output_storage):
A, b = inputs
if self.A_structure == 'lower_triangular':
rval = scipy.linalg.solve_triangular(
A, b, lower=True)
elif self.A_structure == 'upper_triangular':
rval = scipy.linalg.solve_triangular(
A, b, lower=False)
else:
rval = scipy.linalg.solve(A, b)
output_storage[0][0] = rval
# computes shape of x where x = inv(A) * b
def infer_shape(self, node, shapes):
Ashape, Bshape = shapes
rows = Ashape[1]
if len(Bshape) == 1: # b is a Vector
return [(rows,)]
else:
cols = Bshape[1] # b is a Matrix
return [(rows, cols)]
solve = Solve() # general solve
# TODO : SolveTriangular
# TODO: Optimizations to replace multiplication by matrix inverse
# with solve() Op (still unwritten)
class Eigvalsh(Op):
"""
Generalized eigenvalues of a Hermitian positive definite eigensystem.
"""
__props__ = ('lower',)
def __init__(self, lower=True):
assert lower in [True, False]
self.lower = lower
def make_node(self, a, b):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Eigvalsh op")
if b == theano.tensor.NoneConst:
a = as_tensor_variable(a)
assert a.ndim == 2
out_dtype = theano.scalar.upcast(a.dtype)
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a], [w])
else:
a = as_tensor_variable(a)
b = as_tensor_variable(b)
assert a.ndim == 2
assert b.ndim == 2
out_dtype = theano.scalar.upcast(a.dtype, b.dtype)
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a, b], [w])
def perform(self, node, inputs, outputs):
(w,) = outputs
if len(inputs) == 2:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=inputs[1], lower=self.lower)
else:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=None, lower=self.lower)
def grad(self, inputs, g_outputs):
a, b = inputs
gw, = g_outputs
return EigvalshGrad(self.lower)(a, b, gw)
def infer_shape(self, node, shapes):
n = shapes[0][0]
return [(n,)]
class EigvalshGrad(Op):
"""
Gradient of generalized eigenvalues of a Hermitian positive definite
eigensystem.
"""
# Note: This Op (EigvalshGrad), should be removed and replaced with a graph
# of theano ops that is constructed directly in Eigvalsh.grad.
# But this can only be done once scipy.linalg.eigh is available as an Op
# (currently the Eigh uses numpy.linalg.eigh, which doesn't let you
# pass the right-hand-side matrix for a generalized eigenproblem.) See the
# discussion on github at
# https://github.com/Theano/Theano/pull/1846#discussion-diff-12486764
__props__ = ('lower',)
def __init__(self, lower=True):
assert lower in [True, False]
self.lower = lower
if lower:
self.tri0 = numpy.tril
self.tri1 = lambda a: numpy.triu(a, 1)
else:
self.tri0 = numpy.triu
self.tri1 = lambda a: numpy.tril(a, -1)
def make_node(self, a, b, gw):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the GEigvalsh op")
a = as_tensor_variable(a)
b = as_tensor_variable(b)
gw = as_tensor_variable(gw)
assert a.ndim == 2
assert b.ndim == 2
assert gw.ndim == 1
out_dtype = theano.scalar.upcast(a.dtype, b.dtype, gw.dtype)
out1 = theano.tensor.matrix(dtype=out_dtype)
out2 = theano.tensor.matrix(dtype=out_dtype)
return Apply(self, [a, b, gw], [out1, out2])
def perform(self, node, inputs, outputs):
(a, b, gw) = inputs
w, v = scipy.linalg.eigh(a, b, lower=self.lower)
gA = v.dot(numpy.diag(gw).dot(v.T))
gB = - v.dot(numpy.diag(gw * w).dot(v.T))
# See EighGrad comments for an explanation of these lines
out1 = self.tri0(gA) + self.tri1(gA).T
out2 = self.tri0(gB) + self.tri1(gB).T
outputs[0][0] = numpy.asarray(out1, dtype=node.outputs[0].dtype)
outputs[1][0] = numpy.asarray(out2, dtype=node.outputs[1].dtype)
def infer_shape(self, node, shapes):
return [shapes[0], shapes[1]]
def eigvalsh(a, b, lower=True):
return Eigvalsh(lower)(a, b)
def kron(a, b):
""" Kronecker product.
Same as scipy.linalg.kron(a, b).
Parameters
----------
a: array_like
b: array_like
Returns
-------
array_like with a.ndim + b.ndim - 2 dimensions
Notes
-----
numpy.kron(a, b) != scipy.linalg.kron(a, b)!
They don't have the same shape and order when
a.ndim != b.ndim != 2.
"""
a = tensor.as_tensor_variable(a)
b = tensor.as_tensor_variable(b)
if (a.ndim + b.ndim <= 2):
raise TypeError('kron: inputs dimensions must sum to 3 or more. '
'You passed %d and %d.' % (a.ndim, b.ndim))
o = tensor.outer(a, b)
o = o.reshape(tensor.concatenate((a.shape, b.shape)),
a.ndim + b.ndim)
shf = o.dimshuffle(0, 2, 1, * list(range(3, o.ndim)))
if shf.ndim == 3:
shf = o.dimshuffle(1, 0, 2)
o = shf.flatten()
else:
o = shf.reshape((o.shape[0] * o.shape[2],
o.shape[1] * o.shape[3]) +
tuple(o.shape[i] for i in xrange(4, o.ndim)))
return o
class Expm(Op):
"""
Compute the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 2
expm = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, ], [expm, ])
def perform(self, node, inputs, outputs):
(A,) = inputs
(expm,) = outputs
expm[0] = scipy.linalg.expm(A)
def grad(self, inputs, outputs):
(A,) = inputs
(g_out,) = outputs
return [ExpmGrad()(A, g_out)]
def infer_shape(self, node, shapes):
return [shapes[0]]
class ExpmGrad(Op):
"""
Gradient of the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A, gw):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 2
out = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, gw], [out, ])
def infer_shape(self, node, shapes):
return [shapes[0]]
def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T
exp_w = numpy.exp(w)
X = numpy.subtract.outer(exp_w, exp_w) / numpy.subtract.outer(w, w)
numpy.fill_diagonal(X, exp_w)
Y = U.dot(V.T.dot(gA).dot(U) * X).dot(V.T)
with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
out[0] = Y.astype(A.dtype)
expm = Expm()
|
GreenLunar/Bookie
|
refs/heads/develop
|
combo.py
|
9
|
"""WSGI file to serve the combo JS out of convoy"""
import os
from convoy.combo import combo_app
root_dir = os.path.dirname(__file__)
JS_FILES = root_dir + '/bookie/static/js/build'
application = combo_app(JS_FILES)
|
yrizk/django-blog
|
refs/heads/master
|
blogvenv/lib/python3.4/site-packages/pip/_vendor/distlib/wheel.py
|
202
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
bcallaars/trello-python
|
refs/heads/master
|
list/add.py
|
1
|
#!/usr/bin/python
import urllib, urllib2, json, sys
data = urllib.urlencode({
'name': sys.argv[3],
'idBoard': sys.argv[4]
})
url = "https://trello.com/1/lists?key=" + sys.argv[1] + "&token=" + sys.argv[2]
request = urllib2.Request(url, data)
response = urllib2.urlopen(request)
data = json.loads(response.read())
print 'RESULTS ADDING LIST:'
print '- Name:', data['name']
print '- ID:', data['id']
print '- Board ID:', data['idBoard']
print '- Position:', data['pos']
print '- Closed:', 'Yes' if data['closed'] else 'No'
|
sestrella/ansible
|
refs/heads/devel
|
test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py
|
13
|
# (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import os
from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import ec2_vpc_vpn
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, boto3_tag_list_to_ansible_dict
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def get_vgw(connection):
# see if two vgw exist and return them if so
vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}])
if len(vgw['VpnGateways']) >= 2:
return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']]
# otherwise create two and return them
vgw_1 = connection.create_vpn_gateway(Type='ipsec.1')
vgw_2 = connection.create_vpn_gateway(Type='ipsec.1')
for resource in (vgw_1, vgw_2):
connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}])
return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']]
def get_cgw(connection):
# see if two cgw exist and return them if so
cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']},
{'Name': 'tag:Name', 'Values': ['Ansible-CGW']}])
if len(cgw['CustomerGateways']) >= 2:
return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']]
# otherwise create and return them
cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000)
cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000)
for resource in (cgw_1, cgw_2):
connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}])
return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']]
def get_dependencies():
if os.getenv('PLACEBO_RECORD'):
module = FakeModule(**{})
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
vgw = get_vgw(connection)
cgw = get_cgw(connection)
else:
vgw = ["vgw-35d70c2b", "vgw-32d70c2c"]
cgw = ["cgw-6113c87f", "cgw-9e13c880"]
return cgw, vgw
def setup_mod_conn(placeboify, params):
conn = placeboify.client('ec2')
m = FakeModule(**params)
return m, conn
def make_params(cgw, vgw, tags=None, filters=None, routes=None):
tags = {} if tags is None else tags
filters = {} if filters is None else filters
routes = [] if routes is None else routes
return {'customer_gateway_id': cgw,
'static_only': True,
'vpn_gateway_id': vgw,
'connection_type': 'ipsec.1',
'purge_tags': True,
'tags': tags,
'filters': filters,
'routes': routes,
'delay': 15,
'wait_timeout': 600}
def make_conn(placeboify, module, connection):
customer_gateway_id = module.params['customer_gateway_id']
static_only = module.params['static_only']
vpn_gateway_id = module.params['vpn_gateway_id']
connection_type = module.params['connection_type']
check_mode = module.params['check_mode']
changed = True
vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type)
return changed, vpn
def tear_down_conn(placeboify, connection, vpn_connection_id):
ec2_vpc_vpn.delete_connection(connection, vpn_connection_id, delay=15, max_attempts=40)
def test_find_connection_vpc_conn_id(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# find the connection with a vpn_connection_id and assert it is the expected one
assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId']
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_filters(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# update to different tags
params1.update(tags={'Wrong': 'Tag'})
params2.update(tags={'Correct': 'Tag'})
ec2_vpc_vpn.ensure_present(conn1, params1)
ec2_vpc_vpn.ensure_present(conn2, params2)
# create some new parameters for a filter
params = {'filters': {'tags': {'Correct': 'Tag'}}}
# find the connection that has the parameters above
found = ec2_vpc_vpn.find_connection(conn1, params)
# assert the correct connection was found
assert found['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete the connections
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_insufficient_filters(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create two connections with the same tags
params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'})
params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'})
m, conn = setup_mod_conn(placeboify, params)
m2, conn2 = setup_mod_conn(placeboify, params2)
vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)[1]
vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)[1]
# reset the parameters so only filtering by tags will occur
m.params = {'filters': {'tags': {'Correct': 'Tag'}}}
# assert that multiple matching connections have been found
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.find_connection(conn, m.params)
assert error_message == "More than one matching VPN connection was found.To modify or delete a VPN please specify vpn_connection_id or add filters."
# delete the connections
tear_down_conn(placeboify, conn, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn, vpn2['VpnConnectionId'])
def test_find_connection_nonexistent(placeboify, maybe_sleep):
# create parameters but don't create a connection with them
params = {'filters': {'tags': {'Correct': 'Tag'}}}
m, conn = setup_mod_conn(placeboify, params)
# try to find a connection with matching parameters and assert None are found
assert ec2_vpc_vpn.find_connection(conn, m.params) is None
def test_create_connection(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create a connection
params = make_params(cgw[0], vgw[0])
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_present(conn, m.params)
# assert that changed is true and that there is a connection id
assert changed is True
assert 'VpnConnectionId' in vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_create_connection_that_exists(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# try to recreate the same connection
changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params)
# nothing should have changed
assert changed is False
assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_modify_deleted_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
# try to update the deleted connection
m.params.update(vpn_connection_id=vpn['VpnConnectionId'])
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.ensure_present(conn, m.params)
assert error_message == "There is no VPN connection available or pending with that id. Did you delete it?"
def test_delete_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is True
assert vpn == {}
def test_delete_nonexistent_connection(placeboify, maybe_sleep):
# create parameters and ensure any connection matching (None) is deleted
params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}, 'delay': 15, 'wait_timeout': 600}
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is False
assert vpn == {}
def test_check_for_update_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add and remove a number of tags
m.params['tags'] = {'One': 'one', 'Two': 'two'}
ec2_vpc_vpn.ensure_present(conn, m.params)
m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'}
changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId'])
flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add'])
correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}])
assert flat_dict_changes == correct_changes
assert changes['tags_to_remove'] == ['One']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
current_vgw = params['vpn_gateway_id']
# update a parameter that isn't modifiable
m.params.update(vpn_gateway_id="invalidchange")
err = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are tags.'.format(current_vgw)
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.check_for_update(m, conn, vpn['VpnConnectionId'])
assert error_message == err
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add a tag to the connection
ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}])
# assert tag is there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}]
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_remove_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# remove a tag from the connection
ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test'])
# assert the tag is gone
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert 'Tags' not in current_vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_routes(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# create connection with a route
ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24'])
# assert both routes are there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24'])
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def setup_req(placeboify, number_of_results=1):
''' returns dependencies for VPN connections '''
assert number_of_results in (1, 2)
results = []
cgw, vgw = get_dependencies()
for each in range(0, number_of_results):
params = make_params(cgw[each], vgw[each])
m, conn = setup_mod_conn(placeboify, params)
vpn = ec2_vpc_vpn.ensure_present(conn, params)[1]
results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params})
if number_of_results == 1:
return results[0]
else:
return results[0], results[1]
|
jantoniomartin/condottieri_events
|
refs/heads/master
|
__init__.py
|
12133432
| |
guangquanwang/cuda-convnet2
|
refs/heads/master
|
cudaconvnet/__init__.py
|
12133432
| |
mhils/readthedocs.org
|
refs/heads/master
|
readthedocs/oauth/migrations/__init__.py
|
12133432
| |
lauria/Samba4
|
refs/heads/master
|
source3/stf/stf.py
|
137
|
#!/usr/bin/python
#
# Samba Testing Framework for Unit-testing
#
import os, string, re
import osver
def get_server_list_from_string(s):
server_list = []
# Format is a list of server:domain\username%password separated
# by commas.
for entry in string.split(s, ","):
# Parse entry
m = re.match("(.*):(.*)(\\\\|/)(.*)%(.*)", entry)
if not m:
raise "badly formed server list entry '%s'" % entry
server = m.group(1)
domain = m.group(2)
username = m.group(4)
password = m.group(5)
# Categorise servers
server_list.append({"platform": osver.os_version(server),
"hostname": server,
"administrator": {"username": username,
"domain": domain,
"password" : password}})
return server_list
def get_server_list():
"""Iterate through all sources of server info and append them all
in one big list."""
server_list = []
# The $STF_SERVERS environment variable
if os.environ.has_key("STF_SERVERS"):
server_list = server_list + \
get_server_list_from_string(os.environ["STF_SERVERS"])
return server_list
def get_server(platform = None):
"""Return configuration information for a server. The platform
argument can be a string either 'nt4' or 'nt5' for Windows NT or
Windows 2000 servers, or just 'nt' for Windows NT and higher."""
server_list = get_server_list()
for server in server_list:
if platform:
p = server["platform"]
if platform == "nt":
if (p == osver.PLATFORM_NT4 or p == osver.PLATFORM_NT5):
return server
if platform == "nt4" and p == osver.PLATFORM_NT4:
return server
if platform == "nt5" and p == osver.PLATFORM_NT5:
return server
else:
# No filter defined, return first in list
return server
return None
def dict_check(sample_dict, real_dict):
"""Check that real_dict contains all the keys present in sample_dict
and no extras. Also check that common keys are of them same type."""
tmp = real_dict.copy()
for key in sample_dict.keys():
# Check existing key and type
if not real_dict.has_key(key):
raise ValueError, "dict does not contain key '%s'" % key
if type(sample_dict[key]) != type(real_dict[key]):
raise ValueError, "dict has differing types (%s vs %s) for key " \
"'%s'" % (type(sample_dict[key]), type(real_dict[key]), key)
# Check dictionaries recursively
if type(sample_dict[key]) == dict:
dict_check(sample_dict[key], real_dict[key])
# Delete visited keys from copy
del(tmp[key])
# Any keys leftover are present in the real dict but not the sample
if len(tmp) == 0:
return
result = "dict has extra keys: "
for key in tmp.keys():
result = result + key + " "
raise ValueError, result
if __name__ == "__main__":
print get_server(platform = "nt")
|
rjschof/gem5
|
refs/heads/master
|
ext/ply/example/yply/ylex.py
|
165
|
# lexer for yacc-grammars
#
# Author: David Beazley (dave@dabeaz.com)
# Date : October 2, 2006
import sys
sys.path.append("../..")
from ply import *
tokens = (
'LITERAL','SECTION','TOKEN','LEFT','RIGHT','PREC','START','TYPE','NONASSOC','UNION','CODE',
'ID','QLITERAL','NUMBER',
)
states = (('code','exclusive'),)
literals = [ ';', ',', '<', '>', '|',':' ]
t_ignore = ' \t'
t_TOKEN = r'%token'
t_LEFT = r'%left'
t_RIGHT = r'%right'
t_NONASSOC = r'%nonassoc'
t_PREC = r'%prec'
t_START = r'%start'
t_TYPE = r'%type'
t_UNION = r'%union'
t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
t_QLITERAL = r'''(?P<quote>['"]).*?(?P=quote)'''
t_NUMBER = r'\d+'
def t_SECTION(t):
r'%%'
if getattr(t.lexer,"lastsection",0):
t.value = t.lexer.lexdata[t.lexpos+2:]
t.lexer.lexpos = len(t.lexer.lexdata)
else:
t.lexer.lastsection = 0
return t
# Comments
def t_ccomment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
t_ignore_cppcomment = r'//.*'
def t_LITERAL(t):
r'%\{(.|\n)*?%\}'
t.lexer.lineno += t.value.count("\n")
return t
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += 1
def t_code(t):
r'\{'
t.lexer.codestart = t.lexpos
t.lexer.level = 1
t.lexer.begin('code')
def t_code_ignore_string(t):
r'\"([^\\\n]|(\\.))*?\"'
def t_code_ignore_char(t):
r'\'([^\\\n]|(\\.))*?\''
def t_code_ignore_comment(t):
r'/\*(.|\n)*?\*/'
def t_code_ignore_cppcom(t):
r'//.*'
def t_code_lbrace(t):
r'\{'
t.lexer.level += 1
def t_code_rbrace(t):
r'\}'
t.lexer.level -= 1
if t.lexer.level == 0:
t.type = 'CODE'
t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos+1]
t.lexer.begin('INITIAL')
t.lexer.lineno += t.value.count('\n')
return t
t_code_ignore_nonspace = r'[^\s\}\'\"\{]+'
t_code_ignore_whitespace = r'\s+'
t_code_ignore = ""
def t_code_error(t):
raise RuntimeError
def t_error(t):
print "%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0])
print t.value
t.lexer.skip(1)
lex.lex()
if __name__ == '__main__':
lex.runmain()
|
RockySteveJobs/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/zipfile.py
|
45
|
"""
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipFile", "BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == b"\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = b''
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = b''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = bytes(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
else:
raise RuntimeError("That compression method is not supported")
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError('Mode must be "r", "w" or "a"')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipFile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
try:
zinfo = self.getinfo(name)
except KeyError:
if not self._filePassed:
zef_file.close()
raise
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
if not self._filePassed:
zef_file.close()
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
if not self._filePassed:
zef_file.close()
raise RuntimeError("File %s is encrypted, "
"password required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
if not self._filePassed:
zef_file.close()
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = open(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError("That compression method is not supported")
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header data
self.fp.write(zinfo.FileHeader())
self.fp.write(data)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as error:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
|
girving/tensorflow
|
refs/heads/master
|
tensorflow/contrib/boosted_trees/python/ops/prediction_ops.py
|
38
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_partition_examples
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_prediction
from tensorflow.contrib.boosted_trees.python.ops.gen_prediction_ops import gradient_trees_prediction_verbose
# pylint: enable=unused-import
|
zerothi/sids
|
refs/heads/master
|
sisl/physics/tests/test_spin.py
|
1
|
import pytest
pytestmark = pytest.mark.spin
import math as m
import numpy as np
from sisl import Spin
def test_spin1():
for val in ['unpolarized', '', Spin.UNPOLARIZED,
'polarized', 'p', Spin.POLARIZED,
'non-collinear', 'nc', Spin.NONCOLINEAR,
'spin-orbit', 'so', Spin.SPINORBIT]:
s = Spin(val)
str(s)
s1 = s.copy()
assert s == s1
def test_spin2():
s1 = Spin()
s2 = Spin('p')
s3 = Spin('nc')
s4 = Spin('so')
assert s1.kind == Spin.UNPOLARIZED
assert s2.kind == Spin.POLARIZED
assert s3.kind == Spin.NONCOLINEAR
assert s4.kind == Spin.SPINORBIT
assert s1 == s1.copy()
assert s2 == s2.copy()
assert s3 == s3.copy()
assert s4 == s4.copy()
assert s1 < s2
assert s2 < s3
assert s3 < s4
assert s1 <= s2
assert s2 <= s3
assert s3 <= s4
assert s2 > s1
assert s3 > s2
assert s4 > s3
assert s2 >= s1
assert s3 >= s2
assert s4 >= s3
assert s1.is_unpolarized
assert not s1.is_polarized
assert not s1.is_noncolinear
assert not s1.is_spinorbit
assert not s2.is_unpolarized
assert s2.is_polarized
assert not s2.is_noncolinear
assert not s2.is_spinorbit
assert not s3.is_unpolarized
assert not s3.is_polarized
assert s3.is_noncolinear
assert not s3.is_spinorbit
assert not s4.is_unpolarized
assert not s4.is_polarized
assert not s4.is_noncolinear
assert s4.is_spinorbit
def test_spin3():
with pytest.raises(ValueError):
s = Spin('satoehus')
def test_spin4():
s1 = Spin(Spin.UNPOLARIZED)
S1 = Spin(Spin.UNPOLARIZED, np.complex64)
s2 = Spin(Spin.POLARIZED)
S2 = Spin(Spin.POLARIZED, np.complex64)
s3 = Spin(Spin.NONCOLINEAR)
S3 = Spin(Spin.NONCOLINEAR, np.complex64)
s4 = Spin(Spin.SPINORBIT)
S4 = Spin(Spin.SPINORBIT, np.complex64)
assert s1 == S1
assert s2 == S2
assert s3 == S3
assert s4 == S4
# real comparison
assert s1 < S2
assert s1 < S3
assert s1 < S4
assert s2 > S1
assert s2 < S3
assert s2 < S4
assert s3 > S1
assert s3 > S2
assert s3 < S4
assert s4 > S1
assert s4 > S2
assert s4 > S3
# complex complex
assert S1 < S2
assert S1 < S3
assert S1 < S4
assert S2 > S1
assert S2 < S3
assert S2 < S4
assert S3 > S1
assert S3 > S2
assert S3 < S4
assert S4 > S1
assert S4 > S2
assert S4 > S3
# real comparison
assert S1 < s2
assert S1 < s3
assert S1 < s4
assert S2 > s1
assert S2 < s3
assert S2 < s4
assert S3 > s1
assert S3 > s2
assert S3 < s4
assert S4 > s1
assert S4 > s2
assert S4 > s3
# complex complex
assert S1 < s2
assert S1 < s3
assert S1 < s4
assert S2 > s1
assert S2 < s3
assert S2 < s4
assert S3 > s1
assert S3 > s2
assert S3 < s4
assert S4 > s1
assert S4 > s2
assert S4 > s3
def test_pauli():
# just grab the default spin
S = Spin()
# Create a fictituous wave-function
sq2 = 2 ** .5
W = np.array([
[1/sq2, 1/sq2], # M_x = 1
[1/sq2, -1/sq2], # M_x = -1
[0.5 + 0.5j, 0.5 + 0.5j], # M_x = 1
[0.5 - 0.5j, -0.5 + 0.5j], # M_x = -1
[1/sq2, 1j/sq2], # M_y = 1
[1/sq2, -1j/sq2], # M_y = -1
[0.5 - 0.5j, 0.5 + 0.5j], # M_y = 1
[0.5 + 0.5j, 0.5 - 0.5j], # M_y = -1
[1, 0], # M_z = 1
[0, 1], # M_z = -1
])
x = np.array([1, -1, 1, -1, 0, 0, 0, 0, 0, 0])
assert np.allclose(x, (np.conj(W)*S.X.dot(W.T).T).sum(1).real)
y = np.array([0, 0, 0, 0, 1, -1, 1, -1, 0, 0])
assert np.allclose(y, (np.conj(W)*np.dot(S.Y, W.T).T).sum(1).real)
z = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, -1])
assert np.allclose(z, (np.conj(W)*np.dot(S.Z, W.T).T).sum(1).real)
def test_pickle():
import pickle as p
S = Spin('nc')
n = p.dumps(S)
s = p.loads(n)
assert S == s
|
nippoo/phy
|
refs/heads/master
|
phy/io/kwik/mock.py
|
2
|
# -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from ...electrode.mea import staggered_positions
from ..mock import (artificial_spike_samples,
artificial_spike_clusters,
artificial_features,
artificial_masks,
artificial_traces)
from ..h5 import open_h5
from .model import _create_clustering
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True,
with_kwd=True,
add_original=True,
):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
kwx_filename = op.join(dir_path, '_test.kwx')
kwd_filename = op.join(dir_path, '_test.raw.kwd')
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
def _write_metadata(key, value):
f.write_attr('/application_data/spikedetekt', key, value)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high_factor', 0.95 * .5)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('n_features_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
if recording_size > 0:
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
else:
recording_offset = 1
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1', 'channel_order',
np.arange(1, n_channels - 1)[::-1])
graph = np.array([[1, 2], [2, 3]])
f.write_attr('/channel_groups/1', 'adjacency_graph', graph)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters)]
if add_original:
clusterings += [('original', n_clusters * 2)]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
groups = {0: 0, 1: 1, 2: 2}
_create_clustering(f, clustering, 1, spike_clusters, groups)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
f.write_attr('/recordings/0/raw', 'hdf5_path', kwd_filename)
f.write_attr('/recordings/1/raw', 'hdf5_path', kwd_filename)
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
|
exceptionhandle/ImageProcessor.activity
|
refs/heads/master
|
Imaging/doctest.py
|
8
|
# Module doctest version 0.9.6
# Released to the public domain 16-Jan-2001,
# by Tim Peters (tim.one@home.com).
# local modifications:
# 2001-02-13 fl: minor tweaks to make it run under both 1.5.2 and 2.0
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
"""Module doctest -- a framework for running examples in docstrings.
NORMAL USAGE
In normal use, end each module M with:
def _test():
import doctest, M # replace M with your module's name
return doctest.testmod(M) # ditto
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=1" to testmod, or prohibit
it by passing "verbose=0". In either of those cases, sys.argv is not
examined by testmod.
In any case, testmod returns a 2-tuple of ints (f, t), where f is the
number of docstring examples that failed and t is the total number of
docstring examples attempted.
WHICH DOCSTRINGS ARE EXAMINED?
+ M.__doc__.
+ f.__doc__ for all functions f in M.__dict__.values(), except those
with private names.
+ C.__doc__ for all classes C in M.__dict__.values(), except those with
private names.
+ If M.__test__ exists and "is true", it must be a dict, and
each entry maps a (string) name to a function object, class object, or
string. Function and class object docstrings found from M.__test__
are searched even if the name is private, and strings are searched
directly as if they were docstrings. In output, a key K in M.__test__
appears with name
<name of M>.__test__.K
Any classes found are recursively searched similarly, to test docstrings in
their contained methods and nested classes. Private names reached from M's
globals are skipped, but all names reached from M.__test__ are searched.
By default, a name is considered to be private if it begins with an
underscore (like "_my_func") but doesn't both begin and end with (at least)
two underscores (like "__init__"). You can change the default by passing
your own "isprivate" function to testmod.
If you want to test docstrings in objects with private names too, stuff
them into an M.__test__ dict, or see ADVANCED USAGE below (e.g., pass your
own isprivate function to Tester's constructor, or call the rundoc method
of a Tester instance).
Warning: imports can cause trouble; e.g., if you do
from XYZ import XYZclass
then XYZclass is a name in M.__dict__ too, and doctest has no way to know
that XYZclass wasn't *defined* in M. So it may try to execute the examples
in XYZclass's docstring, and those in turn may require a different set of
globals to work correctly. I prefer to do "import *"- friendly imports,
a la
import XYY
_XYZclass = XYZ.XYZclass
del XYZ
or (Python 2.0)
from XYZ import XYZclass as _XYZclass
and then the leading underscore stops testmod from going nuts. You may
prefer the method in the next section.
WHAT'S THE EXECUTION CONTEXT?
By default, each time testmod finds a docstring to test, it uses a *copy*
of M's globals (so that running tests on a module doesn't change the
module's real globals, and so that one test in M can't leave behind crumbs
that accidentally allow another test to work). This means examples can
freely use any names defined at top-level in M. It also means that sloppy
imports (see above) can cause examples in external docstrings to use
globals inappropriate for them.
You can force use of your own dict as the execution context by passing
"globs=your_dict" to testmod instead. Presumably this would be a copy of
M.__dict__ merged with the globals from other imported modules.
WHAT IF I WANT TO TEST A WHOLE PACKAGE?
Piece o' cake, provided the modules do their testing from docstrings.
Here's the test.py I use for the world's most elaborate Rational/
floating-base-conversion pkg (which I'll distribute some day):
from Rational import Cvt
from Rational import Format
from Rational import machprec
from Rational import Rat
from Rational import Round
from Rational import utils
modules = (Cvt,
Format,
machprec,
Rat,
Round,
utils)
def _test():
import doctest
import sys
verbose = "-v" in sys.argv
for mod in modules:
doctest.testmod(mod, verbose=verbose, report=0)
doctest.master.summarize()
if __name__ == "__main__":
_test()
IOW, it just runs testmod on all the pkg modules. testmod remembers the
names and outcomes (# of failures, # of tries) for each item it's seen, and
passing "report=0" prevents it from printing a summary in verbose mode.
Instead, the summary is delayed until all modules have been tested, and
then "doctest.master.summarize()" forces the summary at the end.
So this is very nice in practice: each module can be tested individually
with almost no work beyond writing up docstring examples, and collections
of modules can be tested too as a unit with no more work than the above.
WHAT ABOUT EXCEPTIONS?
No problem, as long as the only output generated by the example is the
traceback itself. For example:
>>> a = [None]
>>> a[1]
Traceback (innermost last):
File "<stdin>", line 1, in ?
IndexError: list index out of range
>>>
Note that only the exception type and value are compared (specifically,
only the last line in the traceback).
ADVANCED USAGE
doctest.testmod() captures the testing policy I find most useful most
often. You may want other policies.
testmod() actually creates a local instance of class doctest.Tester, runs
appropriate methods of that class, and merges the results into global
Tester instance doctest.master.
You can create your own instances of doctest.Tester, and so build your own
policies, or even run methods of doctest.master directly. See
doctest.Tester.__doc__ for details.
SO WHAT DOES A DOCSTRING EXAMPLE LOOK LIKE ALREADY!?
Oh ya. It's easy! In most cases a copy-and-paste of an interactive
console session works fine -- just make sure the leading whitespace is
rigidly consistent (you can mix tabs and spaces if you're too lazy to do it
right, but doctest is not in the business of guessing what you think a tab
means).
>>> # comments are ignored
>>> x = 12
>>> x
12
>>> if x == 13:
... print "yes"
... else:
... print "no"
... print "NO"
... print "NO!!!"
...
no
NO
NO!!!
>>>
Any expected output must immediately follow the final ">>>" or "..." line
containing the code, and the expected output (if any) extends to the next
">>>" or all-whitespace line. That's it.
Bummers:
+ Expected output cannot contain an all-whitespace line, since such a line
is taken to signal the end of expected output.
+ Output to stdout is captured, but not output to stderr (exception
tracebacks are captured via a different means).
+ If you continue a line via backslashing in an interactive session, or for
any other reason use a backslash, you need to double the backslash in the
docstring version. This is simply because you're in a string, and so the
backslash must be escaped for it to survive intact. Like:
>>> if "yes" == \\
... "y" + \\
... "es": # in the source code you'll see the doubled backslashes
... print 'yes'
yes
The starting column doesn't matter:
>>> assert "Easy!"
>>> import math
>>> math.floor(1.9)
1.0
and as many leading whitespace characters are stripped from the expected
output as appeared in the initial ">>>" line that triggered it.
If you execute this very file, the examples above will be found and
executed, leading to this output in verbose mode:
Running doctest.__doc__
Trying: a = [None]
Expecting: nothing
ok
Trying: a[1]
Expecting:
Traceback (innermost last):
File "<stdin>", line 1, in ?
IndexError: list index out of range
ok
Trying: x = 12
Expecting: nothing
ok
Trying: x
Expecting: 12
ok
Trying:
if x == 13:
print "yes"
else:
print "no"
print "NO"
print "NO!!!"
Expecting:
no
NO
NO!!!
ok
... and a bunch more like that, with this summary at the end:
5 items had no tests:
doctest.Tester.__init__
doctest.Tester.run__test__
doctest.Tester.summarize
doctest.run_docstring_examples
doctest.testmod
12 items passed all tests:
9 tests in doctest
6 tests in doctest.Tester
10 tests in doctest.Tester.merge
7 tests in doctest.Tester.rundict
3 tests in doctest.Tester.rundoc
3 tests in doctest.Tester.runstring
2 tests in doctest.__test__._TestClass
2 tests in doctest.__test__._TestClass.__init__
2 tests in doctest.__test__._TestClass.get
1 tests in doctest.__test__._TestClass.square
2 tests in doctest.__test__.string
7 tests in doctest.is_private
54 tests in 17 items.
54 passed and 0 failed.
Test passed.
"""
# 0,0,1 06-Mar-1999
# initial version posted
# 0,0,2 06-Mar-1999
# loosened parsing:
# cater to stinkin' tabs
# don't insist on a blank after PS2 prefix
# so trailing "... " line from a compound stmt no longer
# breaks if the file gets whitespace-trimmed
# better error msgs for inconsistent leading whitespace
# 0,9,1 08-Mar-1999
# exposed the Tester class and added client methods
# plus docstring examples of their use (eww - head-twisting!)
# fixed logic error in reporting total # of tests & failures
# added __test__ support to testmod (a pale reflection of Christian
# Tismer's vision ...)
# removed the "deep" argument; fiddle __test__ instead
# simplified endcase logic for extracting tests, and running them.
# before, if no output was expected but some was produced
# anyway via an eval'ed result, the discrepancy wasn't caught
# made TestClass private and used __test__ to get at it
# many doc updates
# speed _SpoofOut for long expected outputs
# 0,9,2 09-Mar-1999
# throw out comments from examples, enabling use of the much simpler
# exec compile(... "single") ...
# for simulating the runtime; that barfs on comment-only lines
# used the traceback module to do a much better job of reporting
# exceptions
# run __doc__ values thru str(), "just in case"
# privateness of names now determined by an overridable "isprivate"
# function
# by default a name now considered to be private iff it begins with
# an underscore but doesn't both begin & end with two of 'em; so
# e.g. Class.__init__ etc are searched now -- as they always
# should have been
# 0,9,3 18-Mar-1999
# added .flush stub to _SpoofOut (JPython buglet diagnosed by
# Hugh Emberson)
# repaired ridiculous docs about backslashes in examples
# minor internal changes
# changed source to Unix line-end conventions
# moved __test__ logic into new Tester.run__test__ method
# 0,9,4 27-Mar-1999
# report item name and line # in failing examples
# 0,9,5 29-Jun-1999
# allow straightforward exceptions in examples - thanks to Mark Hammond!
# 0,9,6 16-Jan-2001
# fiddling for changes in Python 2.0: some of the embedded docstring
# examples no longer worked *exactly* as advertised, due to minor
# language changes, and running doctest on itself pointed that out.
# Hard to think of a better example of why this is useful <wink>.
__version__ = 0, 9, 6
import types
_FunctionType = types.FunctionType
_ClassType = types.ClassType
_ModuleType = types.ModuleType
_StringType = types.StringType
del types
import string
_string_find = string.find
_string_join = string.join
_string_split = string.split
_string_rindex = string.rindex
del string
import re
PS1 = ">>>"
PS2 = "..."
_isPS1 = re.compile(r"(\s*)" + re.escape(PS1)).match
_isPS2 = re.compile(r"(\s*)" + re.escape(PS2)).match
_isEmpty = re.compile(r"\s*$").match
_isComment = re.compile(r"\s*#").match
del re
__all__ = []
# Extract interactive examples from a string. Return a list of triples,
# (source, outcome, lineno). "source" is the source code, and ends
# with a newline iff the source spans more than one line. "outcome" is
# the expected output if any, else an empty string. When not empty,
# outcome always ends with a newline. "lineno" is the line number,
# 0-based wrt the start of the string, of the first source line.
def _extract_examples(s):
isPS1, isPS2 = _isPS1, _isPS2
isEmpty, isComment = _isEmpty, _isComment
examples = []
lines = _string_split(s, "\n")
i, n = 0, len(lines)
while i < n:
line = lines[i]
i = i + 1
m = isPS1(line)
if m is None:
continue
j = m.end(0) # beyond the prompt
if isEmpty(line, j) or isComment(line, j):
# a bare prompt or comment -- not interesting
continue
lineno = i - 1
if line[j] != " ":
raise ValueError("line " + `lineno` + " of docstring lacks "
"blank after " + PS1 + ": " + line)
j = j + 1
blanks = m.group(1)
nblanks = len(blanks)
# suck up this and following PS2 lines
source = []
while 1:
source.append(line[j:])
line = lines[i]
m = isPS2(line)
if m:
if m.group(1) != blanks:
raise ValueError("inconsistent leading whitespace "
"in line " + `i` + " of docstring: " + line)
i = i + 1
else:
break
if len(source) == 1:
source = source[0]
else:
# get rid of useless null line from trailing empty "..."
if source[-1] == "":
del source[-1]
source = _string_join(source, "\n") + "\n"
# suck up response
if isPS1(line) or isEmpty(line):
expect = ""
else:
expect = []
while 1:
if line[:nblanks] != blanks:
raise ValueError("inconsistent leading whitespace "
"in line " + `i` + " of docstring: " + line)
expect.append(line[nblanks:])
i = i + 1
line = lines[i]
if isPS1(line) or isEmpty(line):
break
expect = _string_join(expect, "\n") + "\n"
examples.append( (source, expect, lineno) )
return examples
# Capture stdout when running examples.
class _SpoofOut:
def __init__(self):
self.clear()
def write(self, s):
self.buf.append(s)
def get(self):
return _string_join(self.buf, "")
def clear(self):
self.buf = []
def flush(self):
# JPython calls flush
pass
# Display some tag-and-msg pairs nicely, keeping the tag and its msg
# on the same line when that makes sense.
def _tag_out(printer, *tag_msg_pairs):
for tag, msg in tag_msg_pairs:
printer(tag + ":")
msg_has_nl = msg[-1:] == "\n"
msg_has_two_nl = msg_has_nl and \
_string_find(msg, "\n") < len(msg) - 1
if len(tag) + len(msg) < 76 and not msg_has_two_nl:
printer(" ")
else:
printer("\n")
printer(msg)
if not msg_has_nl:
printer("\n")
# Run list of examples, in context globs. "out" can be used to display
# stuff to "the real" stdout, and fakeout is an instance of _SpoofOut
# that captures the examples' std output. Return (#failures, #tries).
def _run_examples_inner(out, fakeout, examples, globs, verbose, name):
import sys, traceback
OK, BOOM, FAIL = range(3)
NADA = "nothing"
stderr = _SpoofOut()
failures = 0
for source, want, lineno in examples:
if verbose:
_tag_out(out, ("Trying", source),
("Expecting", want or NADA))
fakeout.clear()
try:
exec compile(source, "<string>", "single") in globs
got = fakeout.get()
state = OK
except:
# See whether the exception was expected.
if _string_find(want, "Traceback (innermost last):\n") == 0 or\
_string_find(want, "Traceback (most recent call last):\n") == 0:
# Only compare exception type and value - the rest of
# the traceback isn't necessary.
want = _string_split(want, '\n')[-2] + '\n'
exc_type, exc_val, exc_tb = sys.exc_info()
got = traceback.format_exception_only(exc_type, exc_val)[0]
state = OK
else:
# unexpected exception
stderr.clear()
traceback.print_exc(file=stderr)
state = BOOM
if state == OK:
if got == want:
if verbose:
out("ok\n")
continue
state = FAIL
assert state in (FAIL, BOOM)
failures = failures + 1
out("*" * 65 + "\n")
_tag_out(out, ("Failure in example", source))
out("from line #" + `lineno` + " of " + name + "\n")
if state == FAIL:
_tag_out(out, ("Expected", want or NADA), ("Got", got))
else:
assert state == BOOM
_tag_out(out, ("Exception raised", stderr.get()))
return failures, len(examples)
# Run list of examples, in context globs. Return (#failures, #tries).
def _run_examples(examples, globs, verbose, name):
import sys
saveout = sys.stdout
try:
sys.stdout = fakeout = _SpoofOut()
x = _run_examples_inner(saveout.write, fakeout, examples,
globs, verbose, name)
finally:
sys.stdout = saveout
return x
def run_docstring_examples(f, globs, verbose=0, name="NoName"):
"""f, globs, verbose=0, name="NoName" -> run examples from f.__doc__.
Use dict globs as the globals for execution.
Return (#failures, #tries).
If optional arg verbose is true, print stuff even if there are no
failures.
Use string name in failure msgs.
"""
try:
doc = f.__doc__
if not doc:
# docstring empty or None
return 0, 0
# just in case CT invents a doc object that has to be forced
# to look like a string <0.9 wink>
doc = str(doc)
except:
return 0, 0
e = _extract_examples(doc)
if not e:
return 0, 0
return _run_examples(e, globs, verbose, name)
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
0
>>> is_private("____", "_my_func")
1
>>> is_private("someclass", "__init__")
0
>>> is_private("sometypo", "__init_")
1
>>> is_private("x.y.z", "_")
1
>>> is_private("_x.y.z", "__")
0
>>> is_private("", "") # senseless but consistent
0
"""
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
class Tester:
"""Class Tester -- runs docstring examples and accumulates stats.
In normal use, function doctest.testmod() hides all this from you,
so use that if you can. Create your own instances of Tester to do
fancier things.
Methods:
runstring(s, name)
Search string s for examples to run; use name for logging.
Return (#failures, #tries).
rundoc(object, name=None)
Search object.__doc__ for examples to run; use name (or
object.__name__) for logging. Return (#failures, #tries).
rundict(d, name)
Search for examples in docstrings in all of d.values(); use name
for logging. Return (#failures, #tries).
run__test__(d, name)
Treat dict d like module.__test__. Return (#failures, #tries).
summarize(verbose=None)
Display summary of testing results, to stdout. Return
(#failures, #tries).
merge(other)
Merge in the test results from Tester instance "other".
>>> from doctest import Tester
>>> t = Tester(globs={'x': 42}, verbose=0)
>>> t.runstring(r'''
... >>> x = x * 2
... >>> print x
... 42
... ''', 'XYZ')
*****************************************************************
Failure in example: print x
from line #2 of XYZ
Expected: 42
Got: 84
(1, 2)
>>> t.runstring(">>> x = x * 2\\n>>> print x\\n84\\n", 'example2')
(0, 2)
>>> t.summarize()
1 items had failures:
1 of 2 in XYZ
***Test Failed*** 1 failures.
(1, 4)
>>> t.summarize(verbose=1)
1 items passed all tests:
2 tests in example2
1 items had failures:
1 of 2 in XYZ
4 tests in 2 items.
3 passed and 1 failed.
***Test Failed*** 1 failures.
(1, 4)
>>>
"""
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None):
"""mod=None, globs=None, verbose=None, isprivate=None
See doctest.__doc__ for an overview.
Optional keyword arg "mod" is a module, whose globals are used for
executing examples. If not specified, globs must be specified.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; if not specified, use the globals from
module mod.
In either case, a copy of the dict is used for each docstring
examined.
Optional keyword arg "verbose" prints lots of stuff if true, only
failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "isprivate" specifies a function used to determine
whether a name is private. The default function is doctest.is_private;
see its docs for details.
"""
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and type(mod) is not _ModuleType:
raise TypeError("Tester.__init__: mod must be a module; " +
`mod`)
if globs is None:
globs = mod.__dict__
self.globs = globs
if verbose is None:
import sys
verbose = "-v" in sys.argv
self.verbose = verbose
if isprivate is None:
isprivate = is_private
self.isprivate = isprivate
self.name2ft = {} # map name to (#failures, #trials) pair
def runstring(self, s, name):
"""
s, name -> search string s for examples to run, logging as name.
Use string name as the key for logging the outcome.
Return (#failures, #examples).
>>> t = Tester(globs={}, verbose=1)
>>> test = r'''
... # just an example
... >>> x = 1 + 2
... >>> x
... 3
... '''
>>> t.runstring(test, "Example")
Running string Example
Trying: x = 1 + 2
Expecting: nothing
ok
Trying: x
Expecting: 3
ok
0 of 2 examples failed in string Example
(0, 2)
"""
if self.verbose:
print "Running string", name
f = t = 0
e = _extract_examples(s)
if e:
f, t = _run_examples(e, self.globs.copy(), self.verbose, name)
if self.verbose:
print f, "of", t, "examples failed in string", name
self.__record_outcome(name, f, t)
return f, t
def rundoc(self, object, name=None):
"""
object, name=None -> search object.__doc__ for examples to run.
Use optional string name as the key for logging the outcome;
by default use object.__name__.
Return (#failures, #examples).
If object is a class object, search recursively for method
docstrings too.
object.__doc__ is examined regardless of name, but if object is
a class, whether private names reached from object are searched
depends on the constructor's "isprivate" argument.
>>> t = Tester(globs={}, verbose=0)
>>> def _f():
... '''Trivial docstring example.
... >>> assert 2 == 2
... '''
... return 32
...
>>> t.rundoc(_f) # expect 0 failures in 1 example
(0, 1)
"""
if name is None:
try:
name = object.__name__
except AttributeError:
raise ValueError("Tester.rundoc: name must be given "
"when object.__name__ doesn't exist; " + `object`)
if self.verbose:
print "Running", name + ".__doc__"
f, t = run_docstring_examples(object, self.globs.copy(),
self.verbose, name)
if self.verbose:
print f, "of", t, "examples failed in", name + ".__doc__"
self.__record_outcome(name, f, t)
if type(object) is _ClassType:
f2, t2 = self.rundict(object.__dict__, name)
f = f + f2
t = t + t2
return f, t
def rundict(self, d, name):
"""
d. name -> search for docstring examples in all of d.values().
For k, v in d.items() such that v is a function or class,
do self.rundoc(v, name + "." + k). Whether this includes
objects with private names depends on the constructor's
"isprivate" argument.
Return aggregate (#failures, #examples).
>>> def _f():
... '''>>> assert 1 == 1
... '''
>>> def g():
... '''>>> assert 2 != 1
... '''
>>> d = {"_f": _f, "g": g}
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(d, "rundict_test") # _f is skipped
(0, 1)
>>> t = Tester(globs={}, verbose=0, isprivate=lambda x,y: 0)
>>> t.rundict(d, "rundict_test_pvt") # both are searched
(0, 2)
"""
if not hasattr(d, "items"):
raise TypeError("Tester.rundict: d must support .items(); " +
`d`)
f = t = 0
for thisname, value in d.items():
if type(value) in (_FunctionType, _ClassType):
f2, t2 = self.__runone(value, name + "." + thisname)
f = f + f2
t = t + t2
return f, t
def run__test__(self, d, name):
"""d, name -> Treat dict d like module.__test__.
Return (#failures, #tries).
See testmod.__doc__ for details.
"""
failures = tries = 0
prefix = name + "."
savepvt = self.isprivate
try:
self.isprivate = lambda *args: 0
for k, v in d.items():
thisname = prefix + k
if type(v) is _StringType:
f, t = self.runstring(v, thisname)
elif type(v) in (_FunctionType, _ClassType):
f, t = self.rundoc(v, thisname)
else:
raise TypeError("Tester.run__test__: values in "
"dict must be strings, functions "
"or classes; " + `v`)
failures = failures + f
tries = tries + t
finally:
self.isprivate = savepvt
return failures, tries
def summarize(self, verbose=None):
"""
verbose=None -> summarize results, return (#failures, #tests).
Print summary of test results to stdout.
Optional arg 'verbose' controls how wordy this is. By
default, use the verbose setting established by the
constructor.
"""
if verbose is None:
verbose = self.verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self.name2ft.items():
name, (f, t) = x
assert f <= t
totalt = totalt + t
totalf = totalf + f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self.name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
def merge(self, other):
"""
other -> merge in test results from the other Tester instance.
If self and other both have a test result for something
with the same name, the (#failures, #tests) results are
summed, and a warning is printed to stdout.
>>> from doctest import Tester
>>> t1 = Tester(globs={}, verbose=0)
>>> t1.runstring('''
... >>> x = 12
... >>> print x
... 12
... ''', "t1example")
(0, 2)
>>>
>>> t2 = Tester(globs={}, verbose=0)
>>> t2.runstring('''
... >>> x = 13
... >>> print x
... 13
... ''', "t2example")
(0, 2)
>>> common = ">>> assert 1 + 2 == 3\\n"
>>> t1.runstring(common, "common")
(0, 1)
>>> t2.runstring(common, "common")
(0, 1)
>>> t1.merge(t2)
*** Tester.merge: 'common' in both testers; summing outcomes.
>>> t1.summarize(1)
3 items passed all tests:
2 tests in common
2 tests in t1example
2 tests in t2example
6 tests in 3 items.
6 passed and 0 failed.
Test passed.
(0, 6)
>>>
"""
d = self.name2ft
for name, (f, t) in other.name2ft.items():
if d.has_key(name):
print "*** Tester.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
def __record_outcome(self, name, f, t):
if self.name2ft.has_key(name):
print "*** Warning: '" + name + "' was tested before;", \
"summing outcomes."
f2, t2 = self.name2ft[name]
f = f + f2
t = t + t2
self.name2ft[name] = f, t
def __runone(self, target, name):
if "." in name:
i = _string_rindex(name, ".")
prefix, base = name[:i], name[i+1:]
else:
prefix, base = "", base
if self.isprivate(prefix, base):
return 0, 0
return self.rundoc(target, name)
master = None
def testmod(m, name=None, globs=None, verbose=None, isprivate=None,
report=1):
"""m, name=None, globs=None, verbose=None, isprivate=None, report=1
Test examples in docstrings in functions and classes reachable from
module m, starting with m.__doc__. Private names are skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__dict__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
doctest.is_private; see its docs for details.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if type(m) is not _ModuleType:
raise TypeError("testmod: module required; " + `m`)
if name is None:
name = m.__name__
tester = Tester(m, globs=globs, verbose=verbose, isprivate=isprivate)
failures, tries = tester.rundoc(m, name)
f, t = tester.rundict(m.__dict__, name)
failures = failures + f
tries = tries + t
if hasattr(m, "__test__"):
testdict = m.__test__
if testdict:
if not hasattr(testdict, "items"):
raise TypeError("testmod: module.__test__ must support "
".items(); " + `testdict`)
f, t = tester.run__test__(testdict, name + ".__test__")
failures = failures + f
tries = tries + t
if report:
tester.summarize()
if master is None:
master = tester
else:
master.merge(tester)
return failures, tries
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
"""
}
def _test():
import doctest
return doctest.testmod(doctest)
if __name__ == "__main__":
_test()
|
mitreaadrian/Soccersim
|
refs/heads/master
|
boost/boost_1_59_0/libs/python/pyste/tests/inheritUT.py
|
54
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
from _inherit import *
class InheritExampleTest(unittest.TestCase):
def testIt(self):
a = A_int()
b = B()
self.assert_(isinstance(b, A_int))
self.assert_(issubclass(B, A_int))
a.set(10)
self.assertEqual(a.get(), 10)
b.set(1)
self.assertEqual(b.go(), 1)
self.assertEqual(b.get(), 1)
d = D()
self.assert_(issubclass(D, B))
self.assertEqual(d.x, 0)
self.assertEqual(d.y, 0)
self.assertEqual(d.s, 1)
self.assertEqual(D.s, 1)
self.assertEqual(d.f1(), 1)
self.assertEqual(d.f2(), 2)
if __name__ == '__main__':
unittest.main()
|
dvspirito/pymeasure
|
refs/heads/master
|
pymeasure/adapters/visa.py
|
1
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import copy
import visa
import numpy as np
from .adapter import Adapter
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# noinspection PyPep8Naming,PyUnresolvedReferences
class VISAAdapter(Adapter):
""" Adapter class for the VISA library using PyVISA to communicate
to instruments. Inherit from either class VISAAdapter14 or VISAAdapter15.
:param resource: VISA resource name that identifies the address
:param kwargs: Any valid key-word arguments for constructing a PyVISA instrument
"""
def __init__(self, resourceName, **kwargs):
# Check PyVisa version
version = float(self.version)
if version < 1.7:
raise NotImplementedError(
"PyVisa {} is no longer supported. Please upgrade to version 1.8 or later.".format(
version))
if isinstance(resourceName, int):
resourceName = "GPIB0::%d::INSTR" % resourceName
super(VISAAdapter, self).__init__()
self.resource_name = resourceName
self.manager = visa.ResourceManager()
safeKeywords = ['resource_name', 'timeout', 'term_chars',
'chunk_size', 'lock', 'delay', 'send_end',
'values_format', 'read_termination']
kwargsCopy = copy.deepcopy(kwargs)
for key in kwargsCopy:
if key not in safeKeywords:
kwargs.pop(key)
self.connection = self.manager.get_instrument(
resourceName,
**kwargs
)
@property
def version(self):
""" The string of the PyVISA version in use
"""
if hasattr(visa, '__version__'):
return visa.__version__
else:
return '1.4'
def __repr__(self):
return "<VISAAdapter(resource='%s')>" % self.connection.resourceName
def write(self, command):
""" Writes a command to the instrument
:param command: SCPI command string to be sent to the instrument
"""
self.connection.write(command)
def read(self):
""" Reads until the buffer is empty and returns the resulting
ASCII respone
:returns: String ASCII response of the instrument.
"""
return self.connection.read()
def ask(self, command):
""" Writes the command to the instrument and returns the resulting
ASCII response
:param command: SCPI command string to be sent to the instrument
:returns: String ASCII response of the instrument
"""
return self.connection.query(command)
def ask_values(self, command):
""" Writes a command to the instrument and returns a list of formatted
values from the result. The format of the return is configurated by
self.config().
:param command: SCPI command to be sent to the instrument
:returns: Formatted response of the instrument.
"""
return self.connection.query_values(command)
def binary_values(self, command, header_bytes=0, dtype=np.float32):
""" Returns a numpy array from a query for binary data
:param command: SCPI command to be sent to the instrument
:param header_bytes: Integer number of bytes to ignore in header
:param dtype: The NumPy data type to format the values with
:returns: NumPy array of values
"""
self.connection.write(command)
binary = self.connection.read_raw()
header, data = binary[:header_bytes], binary[header_bytes:]
return np.fromstring(data, dtype=dtype)
def config(self, is_binary=False, datatype='str',
container=np.array, converter='s',
separator=',', is_big_endian=False):
""" Configurate the format of data transfer to and from the instrument.
:param is_binary: If True, data is in binary format, otherwise ASCII.
:param datatype: Data type.
:param container: Return format. Any callable/type that takes an iterable.
:param converter: String converter, used in dealing with ASCII data.
:param separator: Delimiter of a series of data in ASCII.
:param is_big_endian: Endianness.
"""
self.connection.values_format.is_binary = is_binary
self.connection.values_format.datatype = datatype
self.connection.values_format.container = container
self.connection.values_format.converter = converter
self.connection.values_format.separator = separator
self.connection.values_format.is_big_endian = is_big_endian
def wait_for_srq(self, timeout=25, delay=0.1):
""" Blocks until a SRQ, and leaves the bit high
:param timeout: Timeout duration in seconds
:param delay: Time delay between checking SRQ in seconds
"""
self.connection.wait_for_srq(timeout * 1000)
|
zenodo/zenodo
|
refs/heads/master
|
zenodo/modules/jsonschemas/utils.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""ZenodoJSONSchemas utilities functions."""
from __future__ import absolute_import, print_function
import json
from copy import deepcopy
from flask import current_app
from werkzeug.local import LocalProxy
current_jsonschemas = LocalProxy(
lambda: current_app.extensions['invenio-jsonschemas']
)
_records_state = LocalProxy(lambda: current_app.extensions['invenio-records'])
def resolve_schema_path(schema_path):
"""Resolve a schema by name.
Resolve a schema by it's registered name, e.g. 'records/record-v1.0.0.json'
WARNING: This method returns a deepcopy of the original schema.
Always use this method, as any modifications to a resolved schema
will be retain at the application level!
:param schema_path: schema path, e.g.: 'records/record-v1.0.0.json'.
:type schema_path: str
:returns: JSON schema
:rtype: dict
"""
schema = current_jsonschemas.get_schema(schema_path)
return deepcopy(schema)
def resolve_schema_url(schema_url):
"""Resolve a schema url to a dict.
WARNING: This method returns a deepcopy of the original schema.
Always use this method, as any modifications to a resolved schema
will be retain at the application level!
:param schema_url: absolute url of schema, e.g.:
'https://zenodo.org/schemas/records/record-v1.0.0.json'.
:type schema_url: str
:returns: JSON schema
:rtype: dict
"""
schema_path = current_jsonschemas.url_to_path(schema_url)
return resolve_schema_path(schema_path)
def replace_schema_refs(schema):
"""Replace all the refs in jsonschema.
:param schema: JSON schema for which the refs should be resolved.
:type schema: dict
:returns: JSON schema with resolved refs.
:rtype: dict
"""
return deepcopy(_records_state.replace_refs(schema))
def get_abs_schema_path(schema_path):
"""Resolve absolute schema path on disk from schema name.
Resolve schema name to an absolute schema path on disk, e.g.:
'records/record-v1.0.0.json' could resolve to
'/absolute/path/schemas/records/record-v1.0.0.json'
"""
return current_jsonschemas.get_schema_path(schema_path)
def save_jsonschema(schema, path):
"""Save jsonschema to disk path."""
with open(path, 'w') as fp:
json.dump(schema, fp, indent=2, sort_keys=True, separators=(',', ': '))
fp.write('\n')
def merge_dicts(first, second):
"""Merge the 'second' multiple-dictionary into the 'first' one."""
new = deepcopy(first)
for k, v in second.items():
if isinstance(v, dict) and v:
ret = merge_dicts(new.get(k, dict()), v)
new[k] = ret
else:
new[k] = second[k]
return new
def remove_keys(d, keys):
"""Remove keys from a dictionary (nested).
:param d: dictionary from which the keys are to be removed.
:type d: dict
:param keys: keys to be removed (list of str)
:type keys: list
"""
if isinstance(d, dict):
return dict((k, remove_keys(v, keys)) for k, v in d.items()
if k not in keys)
elif isinstance(d, list):
return list(remove_keys(i, keys) for i in d)
else:
return d
|
savoirfairelinux/account-financial-tools
|
refs/heads/7.0
|
account_credit_control_dunning_fees/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Credit control dunning fees',
'version': '0.1.0',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'category': 'Accounting',
'complexity': 'normal',
'depends': ['account_credit_control'],
'description': """
Dunning Fees for Credit Control
===============================
This extention of credit control adds the notion of dunning fees
on credit control lines.
Configuration
-------------
For release 0.1 only fixed fees are supported.
You can specifiy a fixed fees amount, a product and a currency
on the credit control level form.
The amount will be used as fees values the currency will determine
the currency of the fee. If the credit control line has not the
same currency as the fees currency, fees will be converted to
the credit control line currency.
The product is used to compute taxes in reconciliation process.
Run
---
Fees are automatically computed on credit run and saved
on the generated credit lines.
Fees can be manually edited as long credit line is draft
Credit control Summary report includes a new fees column.
-------
Support of fees price list
""",
'website': 'http://www.camptocamp.com',
'data': ['view/policy_view.xml',
'view/line_view.xml',
'report/report.xml'],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False}
|
T3CHNOLOG1C/Kurisu
|
refs/heads/master
|
addons/kickban.py
|
3
|
import datetime
import discord
import json
import re
import time
from discord.ext import commands
from sys import argv
class KickBan:
"""
Kicking and banning users.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="kick")
async def kick_member(self, ctx, user, *, reason=""):
"""Kicks a user from the server. Staff only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
msg = "You were kicked from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nYou are able to rejoin the server, but please read the rules in #welcome-and-rules before participating again."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("uk:"+member.id)
await self.bot.kick(member)
await self.bot.say("{} is now gone. 👌".format(self.bot.escape_name(member)))
msg = "👢 **Kick**: {} kicked {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.kick <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="ban")
async def ban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban does not expire."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.ban <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="silentban", hidden=True)
async def silentban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server, without a notification. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Silent ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.silentban <user> [reason]`." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="timeban")
async def timeban_member(self, ctx, user, length, *, reason=""):
"""Bans a user for a limited period of time. OP+ only.\n\nLength format: #d#h#m#s"""
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unban_time = timestamp + delta
unban_time_string = unban_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timebans.json", "r") as f:
timebans = json.load(f)
timebans[member.id] = unban_time_string
self.bot.timebans[member.id] = [member, unban_time, False] # last variable is "notified", for <=30 minute notifications
with open("data/timebans.json", "w") as f:
json.dump(timebans, f)
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban expires {} {}.".format(unban_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b& until {} {}. 👍".format(self.bot.escape_name(member), unban_time_string, time.tzname[0]))
msg = "⛔ **Time ban**: {} banned {} until {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, unban_time_string, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.timeban <user> <length> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="softban")
async def softban_member(self, ctx, user, *, reason):
"""Soft-ban a user. OP+ only.\n\nThis "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are account- and IP-based."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
issuer = ctx.message.author
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
if member.id not in softbans:
softbans[member.id] = {}
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
softbans[member.id] = {"name": "{}#{}".format(member.name, member.discriminator), "issuer_id": issuer.id, "issuer_name": issuer.name, "reason": reason, "timestamp": timestamp}
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
msg = "This account is no longer permitted to participate in {}. The reason is: {}".format(self.bot.server.name, softbans[member.id]["reason"])
await self.bot.send_message(member, msg)
await self.bot.kick(member)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Soft-ban**: {} soft-banned {} | {}#{}\n🏷 __User ID__: {}\n✏️ __Reason__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id, reason)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="softbanid")
async def softbanid_member(self, ctx, user_id, *, reason):
"""Soft-ban a user based on ID. OP+ only.\n\nThis "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are account- and IP-based."""
issuer = ctx.message.author
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
name = "???"
if user_id not in softbans:
softbans[user_id] = {}
elif softbans[user_id]["name"] != "???":
name = softbans[user_id]["name"]
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
softbans[user_id] = {"name": name, "issuer_id": issuer.id, "issuer_name": issuer.name, "reason": reason, "timestamp": timestamp}
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
await self.bot.say("ID {} is now b&. 👍".format(user_id))
msg = "⛔ **Soft-ban**: {} soft-banned ID {}\n✏️ __Reason__: {}".format(ctx.message.author.mention, user_id, reason)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="unsoftban")
async def unsoftban_member(self, ctx, user_id):
issuer = ctx.message.author
"""Un-soft-ban a user based on ID. OP+ only."""
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
if user_id not in softbans:
await self.bot.say("{} is not soft-banned!".format(user_id))
return
name = softbans[user_id]["name"]
softbans.pop(user_id)
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
await self.bot.say("{} has been unbanned!".format(self.bot.escape_name(name) if name != "???" else user_id))
msg = "⚠️ **Un-soft-ban**: {} un-soft-banned {}".format(issuer.mention, self.bot.escape_name(name) if name != "???" else "ID {}".format(user_id))
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def listsoftbans(self, user_id=""):
"""List soft bans. Shows all if an ID is not specified."""
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
embed = discord.Embed(color=discord.Color.dark_red())
if user_id == "":
embed.title = "All soft bans"
for softban in softbans:
# sorry this is garbage
embed.add_field(
name=self.bot.escape_name(softbans[softban]["name"]) if softbans[softban]["name"] != "???" else softban,
value="{}Issuer: {}\nTime: {}\nReason: {}".format(
"" if softbans[softban]["name"] == "???" else "ID: {}\n".format(softban),
self.bot.escape_name(softbans[softban]["issuer_name"]),
softbans[softban]["timestamp"],
softbans[softban]["reason"]
)
)
else:
if user_id in softbans:
embed.title = self.bot.escape_name(softbans[user_id]["name"]) if softbans[user_id]["name"] != "???" else user_id
embed.description = "{}Issuer: {}\nTime: {}\nReason: {}".format(
"" if softbans[user_id]["name"] == "???" else "ID: {}\n".format(user_id),
self.bot.escape_name(softbans[user_id]["issuer_name"]),
softbans[user_id]["timestamp"],
softbans[user_id]["reason"]
)
else:
embed.color = discord.Color.green()
embed.title = user_id
embed.description = "ID is not banned!"
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(KickBan(bot))
|
cadop/pyCGM
|
refs/heads/master
|
setup.py
|
1
|
import sys
sys.path.append('./pyCGM_Single') # TODO update to pycgm when fixed
from _about import __version__
from io import open
import setuptools
with open("README.md", "r",encoding="utf8") as fh:
long_description = fh.read()
setuptools.setup(
name="pycgm",
version= __version__,
author="", # Many
author_email="cadop@umich.edu",
description="A Python Implementation of the Conventional Gait Model",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/cadop/pycgm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT",
"Operating System :: OS Independent",
],
python_requires='>=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, !=3.5',
install_requires=['numpy>=1.15','scipy'],
package_data={
"": [
"../SampleData/*/*.c3d",
"../SampleData/*/*.csv",
"../SampleData/*/*.vsk",
"segments.csv"
], # TODO Need to fix
},
include_package_data=True,
)
|
upliftaero/MAVProxy
|
refs/heads/uplift
|
MAVProxy/modules/lib/mp_util.py
|
5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''common mavproxy utility functions'''
import math
import os
import platform
# Some platforms (CYGWIN and others) many not have the wx library
# use imp to see if wx is on the path
has_wxpython = False
if platform.system() == 'Windows':
# auto-detection is failing on windows, for an unknown reason
has_wxpython = True
else:
import imp
try:
imp.find_module('wx')
has_wxpython = True
except ImportError, e:
pass
radius_of_earth = 6378100.0 # in meters
def gps_distance(lat1, lon1, lat2, lon2):
'''return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = math.sin(0.5*dLat)**2 + math.sin(0.5*dLon)**2 * math.cos(lat1) * math.cos(lat2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0-a))
return radius_of_earth * c
def gps_bearing(lat1, lon1, lat2, lon2):
'''return bearing between two points in degrees, in range 0-360
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1)*math.sin(lat2) - math.sin(lat1)*math.cos(lat2)*math.cos(dLon)
bearing = math.degrees(math.atan2(y, x))
if bearing < 0:
bearing += 360.0
return bearing
def wrap_valid_longitude(lon):
''' wrap a longitude value around to always have a value in the range
[-180, +180) i.e 0 => 0, 1 => 1, -1 => -1, 181 => -179, -181 => 179
'''
return (((lon + 180.0) % 360.0) - 180.0)
def gps_newpos(lat, lon, bearing, distance):
'''extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html
'''
lat1 = math.radians(lat)
lon1 = math.radians(lon)
brng = math.radians(bearing)
dr = distance/radius_of_earth
lat2 = math.asin(math.sin(lat1)*math.cos(dr) +
math.cos(lat1)*math.sin(dr)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(dr)*math.cos(lat1),
math.cos(dr)-math.sin(lat1)*math.sin(lat2))
return (math.degrees(lat2), wrap_valid_longitude(math.degrees(lon2)))
def gps_offset(lat, lon, east, north):
'''return new lat/lon after moving east/north
by the given number of meters'''
bearing = math.degrees(math.atan2(east, north))
distance = math.sqrt(east**2 + north**2)
return gps_newpos(lat, lon, bearing, distance)
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/") or dir.endswith("\\"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
try:
os.mkdir(dir)
except Exception:
pass
def polygon_load(filename):
'''load a polygon from a file'''
ret = []
f = open(filename)
for line in f:
if line.startswith('#'):
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 2:
raise RuntimeError("invalid polygon line: %s" % line)
ret.append((float(a[0]), float(a[1])))
f.close()
return ret
def polygon_bounds(points):
'''return bounding box of a polygon in (x,y,width,height) form'''
(minx, miny) = (points[0][0], points[0][1])
(maxx, maxy) = (minx, miny)
for p in points:
minx = min(minx, p[0])
maxx = max(maxx, p[0])
miny = min(miny, p[1])
maxy = max(maxy, p[1])
return (minx, miny, maxx-minx, maxy-miny)
def bounds_overlap(bound1, bound2):
'''return true if two bounding boxes overlap'''
(x1,y1,w1,h1) = bound1
(x2,y2,w2,h2) = bound2
if x1+w1 < x2:
return False
if x2+w2 < x1:
return False
if y1+h1 < y2:
return False
if y2+h2 < y1:
return False
return True
class object_container:
'''return a picklable object from an existing object,
containing all of the normal attributes of the original'''
def __init__(self, object):
for v in dir(object):
if not v.startswith('__') and v not in ['this']:
try:
a = getattr(object, v)
if (hasattr(a, '__call__') or
hasattr(a, '__swig_destroy__') or
str(a).find('Swig Object') != -1):
continue
setattr(self, v, a)
except Exception:
pass
def degrees_to_dms(degrees):
'''return a degrees:minutes:seconds string'''
deg = int(degrees)
min = int((degrees - deg)*60)
sec = ((degrees - deg) - (min/60.0))*60*60
return u'%d\u00b0%02u\'%05.2f"' % (deg, abs(min), abs(sec))
class UTMGrid:
'''class to hold UTM grid position'''
def __init__(self, zone, easting, northing, hemisphere='S'):
self.zone = zone
self.easting = easting
self.northing = northing
self.hemisphere = hemisphere
def __str__(self):
return "%s %u %u %u" % (self.hemisphere, self.zone, self.easting, self.northing)
def latlon(self):
'''return (lat,lon) for the grid coordinates'''
from MAVProxy.modules.lib.ANUGA import lat_long_UTM_conversion
(lat, lon) = lat_long_UTM_conversion.UTMtoLL(self.northing, self.easting, self.zone, isSouthernHemisphere=(self.hemisphere=='S'))
return (lat, lon)
def latlon_to_grid(latlon):
'''convert to grid reference'''
from MAVProxy.modules.lib.ANUGA import redfearn
(zone, easting, northing) = redfearn.redfearn(latlon[0], latlon[1])
if latlon[0] < 0:
hemisphere = 'S'
else:
hemisphere = 'N'
return UTMGrid(zone, easting, northing, hemisphere=hemisphere)
def latlon_round(latlon, spacing=1000):
'''round to nearest grid corner'''
g = latlon_to_grid(latlon)
g.easting = (g.easting // spacing) * spacing
g.northing = (g.northing // spacing) * spacing
return g.latlon()
def wxToPIL(wimg):
'''convert a wxImage to a PIL Image'''
from PIL import Image
(w,h) = wimg.GetSize()
d = wimg.GetData()
pimg = Image.new("RGB", (w,h), color=1)
pimg.fromstring(d)
return pimg
def PILTowx(pimg):
'''convert a PIL Image to a wx image'''
from wx_loader import wx
wimg = wx.EmptyImage(pimg.size[0], pimg.size[1])
wimg.SetData(pimg.convert('RGB').tostring())
return wimg
def dot_mavproxy(name):
'''return a path to store mavproxy data'''
dir = os.path.join(os.environ['HOME'], '.mavproxy')
mkdir_p(dir)
return os.path.join(dir, name)
def download_url(url):
'''download a URL and return the content'''
import urllib2
try:
resp = urllib2.urlopen(url)
headers = resp.info()
except urllib2.URLError as e:
print('Error downloading %s' % url)
return None
return resp.read()
def download_files(files):
'''download an array of files'''
for (url, file) in files:
print("Downloading %s as %s" % (url, file))
data = download_url(url)
if data is None:
continue
try:
open(file, mode='w').write(data)
except Exception as e:
print("Failed to save to %s : %s" % (file, e))
child_fd_list = []
def child_close_fds():
'''close file descriptors that a child process should not inherit.
Should be called from child processes.'''
global child_fd_list
import os
while len(child_fd_list) > 0:
fd = child_fd_list.pop(0)
try:
os.close(fd)
except Exception as msg:
pass
def child_fd_list_add(fd):
'''add a file descriptor to list to be closed in child processes'''
global child_fd_list
child_fd_list.append(fd)
def child_fd_list_remove(fd):
'''remove a file descriptor to list to be closed in child processes'''
global child_fd_list
try:
child_fd_list.remove(fd)
except Exception:
pass
|
marckuz/django
|
refs/heads/master
|
tests/template_tests/test_logging.py
|
117
|
from __future__ import unicode_literals
import logging
from django.template import Context, Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class TestHandler(logging.Handler):
def __init__(self):
super(TestHandler, self).__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class BaseTemplateLoggingTestCase(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(self.loglevel)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
class VariableResolveLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.DEBUG
def test_log_on_variable_does_not_exist_silent(self):
class TestObject(object):
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template_name"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'article' in template 'template_name'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(str(raised_exception), 'Attribute does not exist.')
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'author' in template 'unknown'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(
str(raised_exception),
'Failed lookup for key [author] in %r' % ("{%r: %r}" % ('section', 'News'))
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
class IncludeNodeLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.WARN
@classmethod
def setUpClass(cls):
super(IncludeNodeLoggingTests, cls).setUpClass()
cls.engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ raises_exception }}',
}),
], debug=False)
def error_method():
raise IndexError("some generic exception")
cls.ctx = Context({'raises_exception': error_method})
def test_logs_exceptions_during_rendering_with_debug_disabled(self):
template = self.engine.from_string('{% include "child" %}')
template.name = 'template_name'
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'template_name'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
def test_logs_exceptions_during_rendering_with_no_template_name(self):
template = self.engine.from_string('{% include "child" %}')
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'unknown'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
|
gylian/Sick-Beard
|
refs/heads/master
|
lib/unidecode/x00c.py
|
252
|
data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'RR', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'+', # 0x55
'+', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'L', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'+', # 0xd5
'+', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'lll', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'LL', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
alanswanson/webserver
|
refs/heads/master
|
qa/292-HSTS1.py
|
5
|
from base import *
NICK = "test-2920"
MAX_AGE = 123456
CONF = """
vserver!2920!nick = %(NICK)s
vserver!2920!document_root = %(droot)s
vserver!2920!hsts = 1
vserver!2920!hsts!max_age = %(MAX_AGE)s
vserver!2920!rule!1!match = default
vserver!2920!rule!1!handler = dirlist
"""
# 6.2. HTTP Request Type
#
# If a HSTS Host receives a HTTP request message over a non-secure
# transport, it SHOULD send a HTTP response message containing a
# Status-Code of 301 and a Location header field value containing
# either the HTTP request's original Effective Request URI (see
# Section 12 "Constructing an Effective Request URI", below) altered as
# necessary to have a URI scheme of "https", or a URI generated
# according to local policy (which SHOULD employ a URI scheme of
# "https").
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "HSTS: Error code and Header"
self.request = "HTTP / HTTP/1.0\r\n" + \
"Host: %s\r\n" %(NICK)
self.expected_error = 301
self.expected_content = ["https://"]
def Prepare (self, www):
droot = self.Mkdir (www, "%s_droot"%(NICK))
vars = globals()
vars.update(locals())
self.conf = CONF %(vars)
def Precondition (self):
return not self.is_ssl
|
dhuang/incubator-airflow
|
refs/heads/master
|
tests/contrib/operators/test_file_to_wasb.py
|
38
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.file_to_wasb import FileToWasbOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestFileToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = FileToWasbOperator(
task_id='wasb_operator',
dag=self.dag,
**self._config
)
self.assertEqual(operator.file_path, self._config['file_path'])
self.assertEqual(operator.container_name,
self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(operator.load_options, {})
self.assertEqual(operator.retries, self._config['retries'])
operator = FileToWasbOperator(
task_id='wasb_operator',
dag=self.dag,
load_options={'timeout': 2},
**self._config
)
self.assertEqual(operator.load_options, {'timeout': 2})
@mock.patch('airflow.contrib.operators.file_to_wasb.WasbHook',
autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = FileToWasbOperator(
task_id='wasb_sensor',
dag=self.dag,
load_options={'timeout': 2},
**self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with(
'file', 'container', 'blob', timeout=2
)
if __name__ == '__main__':
unittest.main()
|
nkgilley/home-assistant
|
refs/heads/dev
|
tests/util/test_async.py
|
5
|
"""Tests for async util methods from Python source."""
import asyncio
from unittest import TestCase
import pytest
from homeassistant.util import async_ as hasync
from tests.async_mock import MagicMock, Mock, patch
@patch("asyncio.coroutines.iscoroutine")
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_fire_coroutine_threadsafe_from_inside_event_loop(
mock_ident, _, mock_iscoroutine
):
"""Testing calling fire_coroutine_threadsafe from inside an event loop."""
coro = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
with pytest.raises(RuntimeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = False
with pytest.raises(TypeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_run_callback_threadsafe_from_inside_event_loop(mock_ident, _):
"""Testing calling run_callback_threadsafe from inside an event loop."""
callback = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
with pytest.raises(RuntimeError):
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
class RunThreadsafeTests(TestCase):
"""Test case for hasync.run_coroutine_threadsafe."""
def setUp(self):
"""Test setup method."""
self.loop = asyncio.new_event_loop()
def tearDown(self):
"""Test teardown method."""
executor = self.loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
self.loop.close()
@staticmethod
def run_briefly(loop):
"""Momentarily run a coroutine on the given loop."""
@asyncio.coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
try:
loop.run_until_complete(t)
finally:
gen.close()
def add_callback(self, a, b, fail, invalid):
"""Return a + b."""
if fail:
raise RuntimeError("Fail!")
if invalid:
raise ValueError("Invalid!")
return a + b
@asyncio.coroutine
def add_coroutine(self, a, b, fail, invalid, cancel):
"""Wait 0.05 second and return a + b."""
yield from asyncio.sleep(0.05, loop=self.loop)
if cancel:
asyncio.current_task(self.loop).cancel()
yield
return self.add_callback(a, b, fail, invalid)
def target_callback(self, fail=False, invalid=False):
"""Run add callback in the event loop."""
future = hasync.run_callback_threadsafe(
self.loop, self.add_callback, 1, 2, fail, invalid
)
try:
return future.result()
finally:
future.done() or future.cancel()
def target_coroutine(
self, fail=False, invalid=False, cancel=False, timeout=None, advance_coro=False
):
"""Run add coroutine in the event loop."""
coro = self.add_coroutine(1, 2, fail, invalid, cancel)
future = hasync.run_coroutine_threadsafe(coro, self.loop)
if advance_coro:
# this is for test_run_coroutine_threadsafe_task_factory_exception;
# otherwise it spills errors and breaks **other** unittests, since
# 'target_coroutine' is interacting with threads.
# With this call, `coro` will be advanced, so that
# CoroWrapper.__del__ won't do anything when asyncio tests run
# in debug mode.
self.loop.call_soon_threadsafe(coro.send, None)
try:
return future.result(timeout)
finally:
future.done() or future.cancel()
def test_run_callback_threadsafe(self):
"""Test callback submission from a thread to an event loop."""
future = self.loop.run_in_executor(None, self.target_callback)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_callback_threadsafe_with_exception(self):
"""Test callback submission from thread to event loop on exception."""
future = self.loop.run_in_executor(None, self.target_callback, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_callback_threadsafe_with_invalid(self):
"""Test callback submission from thread to event loop on invalid."""
callback = lambda: self.target_callback(invalid=True) # noqa: E731
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(ValueError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Invalid!", exc_context.exception.args)
async def test_check_loop_async():
"""Test check_loop detects when called from event loop without integration context."""
with pytest.raises(RuntimeError):
hasync.check_loop()
async def test_check_loop_async_integration(caplog):
"""Test check_loop detects when called from event loop from integration context."""
with patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
Mock(
filename="/home/paulus/homeassistant/components/hue/light.py",
lineno="23",
line="self.light.is_on",
),
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
hasync.check_loop()
assert (
"Detected I/O inside the event loop. This is causing stability issues. Please report issue for hue doing I/O at homeassistant/components/hue/light.py, line 23: self.light.is_on"
in caplog.text
)
async def test_check_loop_async_custom(caplog):
"""Test check_loop detects when called from event loop with custom component context."""
with patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
Mock(
filename="/home/paulus/config/custom_components/hue/light.py",
lineno="23",
line="self.light.is_on",
),
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
hasync.check_loop()
assert (
"Detected I/O inside the event loop. This is causing stability issues. Please report issue to the custom component author for hue doing I/O at custom_components/hue/light.py, line 23: self.light.is_on"
in caplog.text
)
def test_check_loop_sync(caplog):
"""Test check_loop does nothing when called from thread."""
hasync.check_loop()
assert "Detected I/O inside the event loop" not in caplog.text
def test_protect_loop_sync():
"""Test protect_loop calls check_loop."""
calls = []
with patch("homeassistant.util.async_.check_loop") as mock_loop:
hasync.protect_loop(calls.append)(1)
assert len(mock_loop.mock_calls) == 1
assert calls == [1]
|
IvannaBesarab/cit-continue
|
refs/heads/master
|
config.py
|
2
|
import os
def database_uri(host, username, password, db_name):
return 'postgresql+psycopg2://{username}:{password}@{host}/{db_name}'. \
format(**{'db_name': db_name, 'host': host,
'username': username,
'password': password})
class Config(object):
# Statement for enabling the development environment
DEBUG = False
TESTING = False
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
UPLOAD_FOLDER = os.path.join(BASE_DIR, 'media')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
DATABASE_CONNECT_OPTIONS = {}
# Enable protection against *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
host = 'localhost'
username = 'cituser'
password = 'citpasswd'
# Secret key for signing cookies
SECRET_KEY = "jd&%G#43WG~dn6"
# Facebook settings
CONSUMER_KEY = '597071850435446'
CONSUMER_SECRET = 'c0e023b09461c502cd3cd7121d205735'
class ProductionDevelopmentConfig(Config):
#Define database connection parameters
host = os.getenv('OPENSHIFT_POSTGRESQL_DB_HOST', Config.host)
username = os.getenv('OPENSHIFT_POSTGRESQL_DB_USERNAME', Config.username)
password = os.getenv('OPENSHIFT_POSTGRESQL_DB_PASSWORD', Config.password)
db_name = os.getenv('OPENSHIFT_APP_NAME', 'cit')
# Define production database
SQLALCHEMY_DATABASE_URI = \
database_uri(host, username, password, db_name)
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = os.getenv('OPENSHIFT_CSRF_SESSION_KEY', None)
# Secret key for signing cookies
SECRET_KEY = os.getenv('OPENSHIFT_SECRET_KEY', Config.SECRET_KEY)
SITE_TITLE = os.getenv('OPENSHIFT_SITE_TITLE', 'Hi, Developer :)')
# Facebook settings
CONSUMER_KEY = os.getenv('OPENSHIFT_CONSUMER_KEY', Config.CONSUMER_KEY)
CONSUMER_SECRET = os.getenv('OPENSHIFT_CONSUMER_SECRET',
Config.CONSUMER_SECRET)
if 'OPENSHIFT_POSTGRESQL_DB_HOST' not in os.environ.keys():
# Statement for enabling the development environment
DEBUG = True
# Enable protection against *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = False
class TestingConfig(Config):
# Statement for enabling the development environment
DEBUG = True
TESTING = True
# Disable protection against *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = False
#Define database connection parameters
db_name = 'cit_test'
# Define the database - we are working with
SQLALCHEMY_DATABASE_URI = \
database_uri(Config.host, Config.username, Config.password, db_name)
# Secret key for signing cookies
SECRET_KEY = Config.SECRET_KEY
SITE_TITLE = "TEST"
# Facebook settings
CONSUMER_KEY = Config.CONSUMER_KEY
CONSUMER_SECRET = Config.CONSUMER_SECRET
|
chenyyx/scikit-learn-doc-zh
|
refs/heads/0.19.X
|
examples/en/cluster/plot_kmeans_digits.py
|
46
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
# #############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
levigross/pyscanner
|
refs/heads/master
|
mytests/django/contrib/formtools/tests/wizard/namedwizardtests/forms.py
|
318
|
import os
import tempfile
from django import forms
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import NamedUrlWizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(NamedUrlWizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data()
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
praxigento/teq_test_db_schema_attrs
|
refs/heads/master
|
prxgt/repo/generator.py
|
1
|
__author__ = 'Alex Gusev <alex@flancer64.com>'
import random
import string
import prxgt.const as const
TYPE_DEC = const.ATTR_TYPE_DEC
TYPE_INT = const.ATTR_TYPE_INT
TYPE_STR = const.ATTR_TYPE_STR
TYPE_TXT = const.ATTR_TYPE_TXT
class Generator(object):
"""
Values generator for various types data.
Простой генератор, который возвращает значение для данных какого-либо типа.
Функция генерации значения может переопределяться через метод set_for_type
(type, function).
Переопределение функции сделано криво - это и не Java (с отдельным типом
параметра - setForType(TypeGenerator newOne)), и не JavaScript (пока что
я не знаю как сделать, чтобы внешняя функция стала "родным" методом для
объекта).
"""
def __init__(self):
self._type_gen = {
TYPE_DEC: _simple_dec,
TYPE_INT: _simple_int,
TYPE_STR: _simple_str,
TYPE_TXT: _simple_txt
}
pass
def set_for_type(self, type_, function_):
self._type_gen[type_] = function_
def get_value(self, type_):
result = self._type_gen[type_](self)
return result
"""
Simple generators bound to the types by default.
"""
def _simple_dec(self):
result = random.randint(0, 10000) / 100
return result
def _simple_int(self):
result = random.randint(0, 10)
return result
def _simple_str(self):
chars = string.ascii_letters + string.digits
result = ''.join(random.choice(chars) for _ in range(8))
return result
def _simple_txt(self):
chars = string.ascii_letters + string.digits + " "
result = ''.join(random.choice(chars) for _ in range(512))
return result
|
Ernesto99/odoo
|
refs/heads/8.0
|
addons/crm_helpdesk/report/__init__.py
|
442
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/completion/matMul.py
|
79
|
class C:
def __matmul<caret>
|
apache/airflow
|
refs/heads/main
|
airflow/contrib/operators/snowflake_operator.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.snowflake.operators.snowflake`."""
import warnings
from airflow.providers.snowflake.operators.snowflake import SnowflakeOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.snowflake.operators.snowflake`.",
DeprecationWarning,
stacklevel=2,
)
|
robinchenyu/imagepaste
|
refs/heads/master
|
lib/PIL/FontFile.py
|
51
|
#
# The Python Imaging Library
# $Id$
#
# base class for raster font file parsers
#
# history:
# 1997-06-05 fl created
# 1997-08-19 fl restrict image width
#
# Copyright (c) 1997-1998 by Secret Labs AB
# Copyright (c) 1997-1998 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import os
from PIL import Image, _binary
WIDTH = 800
def puti16(fp, values):
# write network order (big-endian) 16-bit sequence
for v in values:
if v < 0:
v += 65536
fp.write(_binary.o16be(v))
##
# Base class for raster font file handlers.
class FontFile(object):
bitmap = None
def __init__(self):
self.info = {}
self.glyph = [None] * 256
def __getitem__(self, ix):
return self.glyph[ix]
def compile(self):
"Create metrics and bitmap"
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines += 1
w = (src[2] - src[0])
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return ""
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx = src[2] - src[0]
# yy = src[3] - src[1]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
# print chr(i), dst, s
self.metrics[i] = d, dst, s
def save(self, filename):
"Save font"
self.compile()
# font data
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
fp.write(b"PILfont\n")
fp.write((";;;;;;%d;\n" % self.ysize).encode('ascii')) # HACK!!!
fp.write(b"DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, [0] * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
fp.close()
# End of file
|
Kungbib/SOUhack
|
refs/heads/master
|
sousearch/webapp/sousearch/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
isyippee/nova
|
refs/heads/master
|
nova/api/openstack/compute/server_password.py
|
33
|
# Copyright (c) 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The server password extension."""
from nova.api.metadata import password
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
ALIAS = 'os-server-password'
authorize = extensions.os_compute_authorizer(ALIAS)
class ServerPasswordController(wsgi.Controller):
"""The Server Password API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
passw = password.extract_password(instance)
return {'password': passw or ''}
@extensions.expected_errors(404)
@wsgi.response(204)
def clear(self, req, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
meta = password.convert_password(context, None)
instance.system_metadata.update(meta)
instance.save()
class ServerPassword(extensions.V21APIExtensionBase):
"""Server password support."""
name = "ServerPassword"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(
ALIAS, ServerPasswordController(),
collection_actions={'clear': 'DELETE'},
parent=dict(member_name='server', collection_name='servers'))]
return resources
def get_controller_extensions(self):
return []
|
shenzebang/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/label.py
|
137
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
rickshawman/twitter
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
|
915
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
x2Ident/x2Ident_test
|
refs/heads/master
|
mitmproxy/mitmproxy/protocol/base.py
|
2
|
from __future__ import absolute_import, print_function, division
import sys
import six
import netlib.exceptions
from mitmproxy import exceptions
from mitmproxy import models
class _LayerCodeCompletion(object):
"""
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
"""
def __init__(self, **mixin_args): # pragma: no cover
super(_LayerCodeCompletion, self).__init__(**mixin_args)
if True:
return
self.config = None
"""@type: mitmproxy.proxy.ProxyConfig"""
self.client_conn = None
"""@type: mitmproxy.models.ClientConnection"""
self.server_conn = None
"""@type: mitmproxy.models.ServerConnection"""
self.channel = None
"""@type: mitmproxy.controller.Channel"""
self.ctx = None
"""@type: mitmproxy.protocol.Layer"""
class Layer(_LayerCodeCompletion):
"""
Base class for all layers. All other protocol layers should inherit from this class.
"""
def __init__(self, ctx, **mixin_args):
"""
Each layer usually passes itself to its child layers as a context. Properties of the
context are transparently mapped to the layer, so that the following works:
.. code-block:: python
root_layer = Layer(None)
root_layer.client_conn = 42
sub_layer = Layer(root_layer)
print(sub_layer.client_conn) # 42
The root layer is passed a :py:class:`mitmproxy.proxy.RootContext` object,
which provides access to :py:attr:`.client_conn <mitmproxy.proxy.RootContext.client_conn>`,
:py:attr:`.next_layer <mitmproxy.proxy.RootContext.next_layer>` and other basic attributes.
Args:
ctx: The (read-only) parent layer / context.
"""
self.ctx = ctx
"""
The parent layer.
:type: :py:class:`Layer`
"""
super(Layer, self).__init__(**mixin_args)
def __call__(self):
"""Logic of the layer.
Returns:
Once the protocol has finished without exceptions.
Raises:
~mitmproxy.exceptions.ProtocolException: if an exception occurs. No other exceptions must be raised.
"""
raise NotImplementedError()
def __getattr__(self, name):
"""
Attributes not present on the current layer are looked up on the context.
"""
return getattr(self.ctx, name)
@property
def layers(self):
"""
List of all layers, including the current layer (``[self, self.ctx, self.ctx.ctx, ...]``)
"""
return [self] + self.ctx.layers
def __repr__(self):
return type(self).__name__
class ServerConnectionMixin(object):
"""
Mixin that provides a layer with the capabilities to manage a server connection.
The server address can be passed in the constructor or set by calling :py:meth:`set_server`.
Subclasses are responsible for calling :py:meth:`disconnect` before returning.
Recommended Usage:
.. code-block:: python
class MyLayer(Layer, ServerConnectionMixin):
def __call__(self):
try:
# Do something.
finally:
if self.server_conn:
self.disconnect()
"""
def __init__(self, server_address=None):
super(ServerConnectionMixin, self).__init__()
self.server_conn = models.ServerConnection(server_address, (self.config.options.listen_host, 0))
self.__check_self_connect()
def __check_self_connect(self):
"""
We try to protect the proxy from _accidentally_ connecting to itself,
e.g. because of a failed transparent lookup or an invalid configuration.
"""
address = self.server_conn.address
if address:
self_connect = (
address.port == self.config.options.listen_port and
address.host in ("localhost", "127.0.0.1", "::1")
)
if self_connect:
raise exceptions.ProtocolException(
"Invalid server address: {}\r\n"
"The proxy shall not connect to itself.".format(repr(address))
)
def set_server(self, address):
"""
Sets a new server address. If there is an existing connection, it will be closed.
"""
if self.server_conn:
self.disconnect()
self.log("Set new server address: " + repr(address), "debug")
self.server_conn.address = address
self.__check_self_connect()
def disconnect(self):
"""
Deletes (and closes) an existing server connection.
Must not be called if there is no existing connection.
"""
self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
address = self.server_conn.address
source_address = self.server_conn.source_address
self.server_conn.finish()
self.server_conn.close()
self.channel.tell("serverdisconnect", self.server_conn)
self.server_conn = models.ServerConnection(address, (source_address.host, 0))
def connect(self):
"""
Establishes a server connection.
Must not be called if there is an existing connection.
Raises:
~mitmproxy.exceptions.ProtocolException: if the connection could not be established.
"""
if not self.server_conn.address:
raise exceptions.ProtocolException("Cannot connect to server, no server address given.")
self.log("serverconnect", "debug", [repr(self.server_conn.address)])
self.channel.ask("serverconnect", self.server_conn)
try:
self.server_conn.connect()
except netlib.exceptions.TcpException as e:
six.reraise(
exceptions.ProtocolException,
exceptions.ProtocolException(
"Server connection to {} failed: {}".format(
repr(self.server_conn.address), str(e)
)
),
sys.exc_info()[2]
)
|
CredoReference/edx-platform
|
refs/heads/integration-hawthorn-qa
|
common/djangoapps/course_modes/migrations/0010_archived_suggested_prices_to_charfield.py
|
18
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import re
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0009_suggested_prices_to_charfield'),
]
operations = [
migrations.AlterField(
model_name='coursemodesarchive',
name='suggested_prices',
field=models.CharField(default=b'', max_length=255, blank=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\d,]+\\Z'), 'Enter only digits separated by commas.', 'invalid')]),
),
]
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Scrapy-1.0.1/scrapy/contrib/throttle.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.throttle` is deprecated, "
"use `scrapy.extensions.throttle` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.extensions.throttle import *
|
Kent-Liang/django-polls
|
refs/heads/master
|
polls/migrations/0003_auto_20151108_1453.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_auto_20151108_1437'),
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(to='polls.Question')),
],
),
migrations.RemoveField(
model_name='options',
name='question',
),
migrations.DeleteModel(
name='Options',
),
]
|
asajeffrey/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/process.py
|
10
|
import sys
import six
def cast_env(env):
"""Encode all the environment values as the appropriate type for each Python version
This assumes that all the data is or can be represented as UTF8"""
env_type = six.ensure_binary if sys.version_info[0] < 3 else six.ensure_str
return {env_type(key): env_type(value) for key, value in six.iteritems(env)}
|
paulsmith/geodjango
|
refs/heads/master
|
django/core/management/commands/flush.py
|
14
|
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
)
help = "Executes ``sqlflush`` on the current database."
def handle_noargs(self, **options):
from django.conf import settings
from django.db import connection, transaction, models
from django.dispatch import dispatcher
from django.core.management.sql import sql_flush, emit_post_sync_signal
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + '.management', {}, {}, [''])
except ImportError:
pass
sql_list = sql_flush(self.style, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % settings.DATABASE_NAME)
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (settings.DATABASE_NAME, e))
transaction.commit_unless_managed()
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
emit_post_sync_signal(models.get_models(), verbosity, interactive)
# Reinstall the initial_data fixture.
from django.core.management import call_command
call_command('loaddata', 'initial_data', **options)
else:
print "Flush cancelled."
|
bikash/h2o-dev
|
refs/heads/master
|
h2o-py/tests/testdir_algos/glm/pyunit_NOFEATURE_getLambdaModel_mediumGLM.py
|
2
|
import sys
sys.path.insert(1, "../../../")
import h2o
import random
def getLambdaModel(ip,port):
# Connect to h2o
h2o.init(ip,port)
print("Read data")
prostate = h2o.import_frame(path=h2o.locate("smalldata/logreg/prostate.csv"))
myX = ["AGE","RACE","DPROS","DCAPS","PSA","VOL","GLEASON"]
myY = "CAPSULE"
family = random.choice(["gaussian","binomial"])
print(family)
print("Do lambda search and build models")
if family == "gaussian":
model = h2o.glm(x=prostate[myX], y=prostate[myY], family=family, standardize=True, use_all_factor_levels=True, lambda_search=True)
else:
model = h2o.glm(x=prostate[myX], y=prostate[myY].asfactor(), family=family, standardize=True, use_all_factor_levels=True, lambda_search=True)
print("the models were built over the following lambda values: ")
all_lambdas = model.models(1).lambda_all()
print(all_lambdas)
for i in range(10):
Lambda = random.sample(all_lambdas,1)
print("For Lambda we get this model:")
m1 = h2o.getGLMLambdaModel(model.models(random.randint(0,len(model.models()-1)),Lambda=Lambda))
m1.show()
print("this model should be same as the one above:")
m2 = h2o.getGLMLambdaModel(model.models(random.randint(0,len(model.models()-1)),Lambda=Lambda))
m2.show()
assert m1==m2, "expected models to be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, getLambdaModel)
|
52ai/django-ccsds
|
refs/heads/master
|
scripts/manage_translations.py
|
277
|
#!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from argparse import ArgumentParser
from subprocess import PIPE, Popen, call
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep '^[-+]msgid' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip())
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
if resources is not None:
print("`update_catalogs` will always process all resources.")
contrib_dirs = _get_locale_dirs(None, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating en catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'])
print("Updating en JS catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'], domain='djangojs')
# Output changed stats
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
for name, dir_ in contrib_dirs:
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
target_langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
target_langs = languages
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in target_langs:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat --no-location -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = ArgumentParser()
parser.add_argument('cmd', nargs=1)
parser.add_argument("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action='append',
help="limit operation to the specified languages")
options = parser.parse_args()
if options.cmd[0] in RUNABLE_SCRIPTS:
eval(options.cmd[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
|
alextruberg/custom_django
|
refs/heads/master
|
tests/fixtures/tests.py
|
50
|
from __future__ import absolute_import
import warnings
from django.contrib.sites.models import Site
from django.core import management
from django.db import connection, IntegrityError
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from django.utils import six
from .models import Article, Book, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', natural_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'use_natural_keys': natural_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_initial_data(self):
# syncdb introduces 1 initial data object from initial_data.json.
self.assertQuerysetEqual(Book.objects.all(), [
'<Book: Achieving self-awareness of Python programs>'
])
def test_loading_and_dumping(self):
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(['fixtures.Category'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]')
# ...and just fixtures.Article
self._dumpdata_assert(['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# ...and both
self._dumpdata_assert(['fixtures.Category', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a specific model twice
self._dumpdata_assert(['fixtures.Article', 'fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]')
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(['fixtures.Article', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Same again, but specify in the reverse order
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(['fixtures.Category', 'sites'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]')
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
self.assertQuerysetEqual(Book.objects.all(), [
'<Book: Achieving self-awareness of Python programs>',
'<Book: Music for all ages by Artist formerly known as "Prince" and Django Reinhardt>'
])
# Loading a fixture that doesn't exist emits a warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
management.call_command('loaddata', 'unknown.json', verbosity=0)
self.assertEqual(len(w), 1)
self.assertTrue(w[0].message, "No fixture named 'unknown' found.")
# An attempt to load a nonexistent 'initial_data' fixture isn't an error
with warnings.catch_warnings(record=True) as w:
management.call_command('loaddata', 'initial_data.json', verbosity=0)
self.assertEqual(len(w), 0)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, "model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": "2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", "fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], ["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": "fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person": ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, {"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag"><field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="1" model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as "Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object><object pk="10" model="fixtures.book"><field type="CharField" name="name">Achieving self-awareness of Python programs</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"></field></object></django-objects>""", format='xml', natural_keys=True)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book'])
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites'])
# Excluding a bogus app should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown app in excludes: foo_app"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with six.assertRaisesRegex(self, management.CommandError,
"Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk), use_base_manager=True)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with six.assertRaisesRegex(self, management.CommandError,
"You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambigous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with warnings.catch_warnings(record=True):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with warnings.catch_warnings(record=True):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]', natural_keys=True)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field><field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field><field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Prince</field></object><object pk="10" model="fixtures.book"><field type="CharField" name="name">Achieving self-awareness of Python programs</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"></field></object></django-objects>""", format='xml', natural_keys=True)
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 10, "model": "fixtures.book", "fields": {"name": "Achieving self-awareness of Python programs", "authors": []}}]')
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
|
xflows/rdm
|
refs/heads/master
|
rdm/wrappers/migration.py
|
2
|
for w in AbstractWidget.objects.filter(package='ilp'):
w.package = 'rdm.wrappers'
w.save()
|
40223110/2015cda_0512
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/formatter.py
|
751
|
"""Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
|
engagespark/mulungwishi-webhook
|
refs/heads/master
|
app/__init__.py
|
4
|
from flask import Flask
mulungwishi_app = Flask(__name__)
|
fabric8-analytics/fabric8-analytics-jobs
|
refs/heads/master
|
f8a_jobs/handlers/flow.py
|
1
|
"""Schedule multiple flows of a type."""
from .base import BaseHandler
class FlowScheduling(BaseHandler):
"""Schedule multiple flows of a type."""
def execute(self, flow_name, flow_arguments):
"""Schedule multiple flows of a type, do filter expansion if needed.
:param flow_name: flow name that should be scheduled
:param flow_arguments: a list of flow arguments per flow
"""
for node_args in flow_arguments:
if self.is_filter_query(node_args):
for args in self.expand_filter_query(node_args):
self.run_selinon_flow(flow_name, args)
else:
self.run_selinon_flow(flow_name, node_args)
|
pforret/python-for-android
|
refs/heads/master
|
python3-alpha/python-libs/atom/http.py
|
46
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import http.client
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
ssl_imported = False
ssl = None
try:
import ssl
ssl_imported = True
except ImportError:
pass
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, str):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, str):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, str):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return http.client.HTTPSConnection(url.host)
return http.client.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return http.client.HTTPConnection(url.host)
return http.client.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable, or in 'http_proxy' or
'https_proxy' as "protocol://[username:password@]host:port".
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_settings = os.environ.get('%s_proxy' % url.protocol)
if not proxy_settings:
# The request was HTTP or HTTPS, but there was no appropriate proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy_auth = _get_proxy_auth(proxy_settings)
proxy_netloc = _get_proxy_net_location(proxy_settings)
if url.protocol == 'https':
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = 'User-Agent: python\r\n'
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl_imported:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, None)
sslobj = http.client.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = http.client.HTTPConnection(proxy_url.host)
connection.sock = sslobj
return connection
else:
# If protocol was not https.
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return http.client.HTTPConnection(proxy_url.host, int(proxy_url.port))
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth(proxy_settings):
"""Returns proxy authentication string for header.
Will check environment variables for proxy authentication info, starting with
proxy(_/-)username and proxy(_/-)password before checking the given
proxy_settings for a [protocol://]username:password@host[:port] string.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Returns:
Authentication string for proxy, or empty string if no proxy username was
found.
"""
proxy_username = None
proxy_password = None
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if not proxy_username:
if '@' in proxy_settings:
protocol_and_proxy_auth = proxy_settings.split('@')[0].split(':')
if len(protocol_and_proxy_auth) == 3:
# 3 elements means we have [<protocol>, //<user>, <password>]
proxy_username = protocol_and_proxy_auth[1].lstrip('/')
proxy_password = protocol_and_proxy_auth[2]
elif len(protocol_and_proxy_auth) == 2:
# 2 elements means we have [<user>, <password>]
proxy_username = protocol_and_proxy_auth[0]
proxy_password = protocol_and_proxy_auth[1]
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _get_proxy_net_location(proxy_settings):
"""Returns proxy host and port.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Must be in the form of protocol://[username:password@]host:port
Returns:
String in the form of protocol://host:port
"""
if '@' in proxy_settings:
protocol = proxy_settings.split(':')[0]
netloc = proxy_settings.split('@')[1]
return '%s://%s' % (protocol, netloc)
else:
return proxy_settings
def _send_data_part(data, connection):
# Check to see if data is a file-like object that has a read method.
if hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string bytes and send the data.
connection.send(bytes(data, "UTF-8"))
return
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
server/openerp/pychart/afm/Helvetica_Narrow_Oblique.py
|
15
|
# -*- coding: utf-8 -*-
# AFM font Helvetica-Narrow-Oblique (path: /usr/share/fonts/afms/adobe/phvro8an.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Helvetica-Narrow-Oblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 228, 228, 291, 456, 456, 729, 547, 182, 273, 273, 319, 479, 228, 273, 228, 228, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 228, 228, 479, 479, 479, 456, 832, 547, 547, 592, 592, 547, 501, 638, 592, 228, 410, 547, 456, 683, 592, 638, 547, 638, 592, 547, 501, 592, 547, 774, 547, 547, 501, 228, 228, 228, 385, 456, 182, 456, 456, 410, 456, 456, 228, 456, 456, 182, 182, 410, 182, 683, 456, 456, 456, 456, 273, 410, 228, 456, 410, 592, 410, 410, 410, 274, 213, 274, 479, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 273, 456, 456, 137, 456, 456, 456, 456, 157, 273, 456, 273, 273, 410, 410, 500, 456, 456, 456, 228, 500, 440, 287, 182, 273, 273, 456, 820, 820, 500, 501, 500, 273, 273, 273, 273, 273, 273, 273, 273, 500, 273, 273, 500, 273, 273, 273, 820, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 820, 500, 303, 500, 500, 500, 500, 456, 638, 820, 299, 500, 500, 500, 500, 500, 729, 500, 500, 500, 228, 500, 500, 182, 501, 774, 501, )
|
spyder-ide/spyder.line-profiler
|
refs/heads/master
|
spyder_line_profiler/__init__.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
__version__ = '0.3.0.dev0'
# =============================================================================
# The following statements are required to register this 3rd party plugin:
# =============================================================================
from .lineprofiler import LineProfiler
PLUGIN_CLASS = LineProfiler
|
nunogt/tempest
|
refs/heads/master
|
tempest/api/compute/limits/__init__.py
|
12133432
| |
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/types/NotImportedModuleInDunderAll/pkg/aaa.py
|
12133432
| |
torchingloom/edx-platform
|
refs/heads/select/release
|
lms/lib/xblock/test/__init__.py
|
12133432
| |
SophiZweig3/Sophie
|
refs/heads/master
|
py/openage/testing/__init__.py
|
12133432
| |
qliu/globe_nocturne
|
refs/heads/master
|
globenocturne/smart_selects/__init__.py
|
12133432
| |
Ernest0x/django-gpgauth
|
refs/heads/master
|
gpgauth/views.py
|
1
|
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import validate_email
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django_gpgauth.gpgauth.forms import RegisterForm, RenewForm, LoginForm
from django_gpgauth.gpgauth.models import PGPkey
from gnupg import GPG
from hashlib import md5
# Utility Functions
def delete_keys(gpg, fingerprints, del_existed=False):
# Make sure that fingerprint is a list
if type(fingerprints).__name__ != 'list':
fingerprints = [fingerprints]
for fp in fingerprints:
# Delete key only if it does not exist in database or del_existed is True
try:
key = PGPkey.objects.get(fingerprint = fp)
if del_existed:
gpg.delete_keys(fp)
except ObjectDoesNotExist:
gpg.delete_keys(fp)
def key_import(gpg, keyfile):
if keyfile.size > 100000: # accept files of a normal size
error = 'Key file size is too big'
else:
error = ''
try:
import_result = gpg.import_keys(keyfile.read())
except UnicodeDecodeError:
error = 'There was an error in importing your key'
return error, None
if import_result.count == 0:
error = 'There was an error in importing your key'
if import_result.count > 1: # accept only single-key files
error = 'Your key file includes more than one keys'
delete_keys(gpg, import_result.fingerprints)
if import_result.count == 1:
fp = import_result.fingerprints[0]
if gpg.key_is_expired(fp):
error = 'Your key is expired'
delete_keys(gpg, import_result.fingerprints)
else:
return error, gpg.get_key(fp)
return error, None
def login_common_checks(username):
try:
user = User.objects.get(username = username)
if user.is_active:
gpg = GPG(gpgbinary=settings.GNUPGBINARY, gnupghome=settings.GNUPGHOME)
if not gpg.key_is_expired(user.pgpkey.fingerprint):
key = gpg.get_key(user.pgpkey.fingerprint)
if key['ownertrust'] in settings.TRUST_LEVELS:
error = False
else:
error = 'PGP key for user \'%s\' is not trusted (yet)' % username
else:
error = 'PGP key for user \'%s\' has expired' % username
else:
error = 'Account for user \'%s\' is disabled' % username
gpg = None
except ObjectDoesNotExist:
error = 'User \'%s\' does not exist' % username
user = None
gpg = None
return user, error, gpg
# Views
def register(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
error = ''
success = ''
if request.POST:
form = RegisterForm(request.POST, request.FILES)
if form.is_valid():
keyfile = request.FILES['keyfile']
gpg = GPG(gpgbinary=settings.GNUPGBINARY, gnupghome=settings.GNUPGHOME)
(error, imported_key) = key_import(gpg, keyfile)
if not error:
# check for user existance in database to accept registration only for new users
try:
user = User.objects.get(email = imported_key['email'])
error = 'User \'%s\' is already registered' % imported_key['email']
if user.pgpkey.fingerprint != imported_key['fingerprint']:
delete_keys(gpg, imported_key['fingerprint'])
except ObjectDoesNotExist:
newuser = User.objects.create_user(username = imported_key['email'],
email = imported_key['email'],
password = '')
newuser.set_unusable_password()
newuser.save()
pgpkey = PGPkey(user = newuser, name = imported_key['name'], fingerprint = imported_key['fingerprint'])
pgpkey.save()
success = 'You are now registered'
else:
form = RegisterForm()
return render_to_response('register.html',
{ 'form': form,
'error': error,
'success': success }, context_instance = RequestContext(request))
def renew(request, username):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
error = ''
success = ''
try:
validate_email(username)
except:
error = 'Invalid username'
if not error:
(user, error, gpg) = login_common_checks(username)
if not error:
error = 'Your key is not expired yet'
if error.endswith('expired'):
error = ''
if request.POST:
form = RenewForm(request.POST, request.FILES)
if form.is_valid():
if user.pgpkey.renew_passwd != md5(form.cleaned_data['password']).hexdigest():
passwd = User.objects.make_random_password()
user.pgpkey.renew_passwd = md5(passwd).hexdigest()
user.pgpkey.save()
msg = 'Use the following password to renew your key:\n' + passwd
user.email_user('Renew Key', msg)
error = 'Wrong Password. The correct password has been sent to: %s' % username
else:
keyfile = request.FILES['keyfile']
(error, imported_key) = key_import(gpg, keyfile)
if not error:
if imported_key['fingerprint'] == user.pgpkey.fingerprint:
error = 'The uploaded key already exists'
else:
update_user = False
# Check if the email of the uploaded key is already used by a different user
try:
user = User.objects.get(username = imported_key['email'])
if username != imported_key['email']:
error = 'There is another user with username: \'%s\'' % imported_key['email']
delete_keys(gpg, imported_key['fingerprint'])
else:
update_user = True
except:
update_user = True
if update_user:
delete_keys(gpg, user.pgpkey.fingerprint, del_existed=True)
user.pgpkey.fingerprint = imported_key['fingerprint']
user.pgpkey.name = imported_key['name']
user.pgpkey.save()
user.username = imported_key['email']
user.email = imported_key['email']
user.save()
success = 'Your key was successfuly renewed'
else:
form = RenewForm()
return render_to_response('renew.html',
{ 'form': form,
'error': error,
'success': success,
'username': username }, context_instance = RequestContext(request))
def login_view(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
error = ''
password = ''
try:
goto_stage = request.POST['stage']
except:
goto_stage = 'password'
if request.POST:
form = LoginForm(request.POST)
if goto_stage == 'password':
try:
validate_email(request.POST['username'])
except:
error = 'Invalid username'
if not error:
(user, error, gpg) = login_common_checks(request.POST['username'])
if not error:
password = User.objects.make_random_password()
user.set_password(password)
user.save()
password = gpg.encrypt(password, user.pgpkey.fingerprint, always_trust=True) # Trust level is checked earlier
if password.ok:
goto_stage = 'afterpass'
else:
user.set_unusable_password()
user.save()
error = 'Encryption error (%s)' % password.status
elif error.endswith('expired'):
return redirect('/renew/%s' % request.POST['username'])
else:
pass
elif goto_stage == 'afterpass':
if form.is_valid():
# Run common checks again to disappoint those who try to bypass first login step
(user, error, gpg) = login_common_checks(form.cleaned_data['username'])
if not error:
user = authenticate(username=form.cleaned_data['username'], password = form.cleaned_data['password'] )
if user is not None:
login(request, user)
return HttpResponseRedirect('/')
else:
error = 'Wrong password'
form = '';
else:
goto_stage = 'password'
else:
error = 'Invalid username or password'
goto_stage = 'password'
else:
form = LoginForm()
return render_to_response('login.html', { 'form': form,
'goto_stage': goto_stage,
'error': error,
'password': password }, context_instance = RequestContext(request))
def logout_view(request):
logout(request)
return HttpResponseRedirect('/')
|
bitmovin/bitmovin-python
|
refs/heads/master
|
bitmovin/services/encodings/sprite_service.py
|
1
|
from bitmovin.errors import MissingArgumentError
from bitmovin.resources.models import Sprite as SpriteResource
from bitmovin.services.rest_service import RestService
class SpriteService(RestService):
BASE_ENDPOINT_URL = 'encoding/encodings/{encoding_id}/streams/{stream_id}/sprites'
def __init__(self, http_client):
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=SpriteResource)
def _get_endpoint_url(self, encoding_id, stream_id):
if not encoding_id:
raise MissingArgumentError('encoding_id must be given')
if not stream_id:
raise MissingArgumentError('stream_id must be given')
endpoint_url = self.BASE_ENDPOINT_URL\
.replace('{encoding_id}', encoding_id)\
.replace('{stream_id}', stream_id)
return endpoint_url
def create(self, object_, encoding_id, stream_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().create(object_)
def delete(self, encoding_id, stream_id, sprite_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().delete(id_=sprite_id)
def retrieve(self, encoding_id, stream_id, sprite_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().retrieve(id_=sprite_id)
def list(self, encoding_id, stream_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().list(offset, limit)
def retrieve_custom_data(self, encoding_id, stream_id, sprite_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id, stream_id=stream_id)
return super().retrieve_custom_data(id_=sprite_id)
|
alanljj/oca_hr
|
refs/heads/8.0
|
hr_family/__init__.py
|
22
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import models
|
virantha/verifytree
|
refs/heads/master
|
verifytree/dir_checksum.py
|
1
|
# Copyright 2015 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Class to represent a directory of files and their checksums
"""
import os, logging, copy
from file_checksum import FileChecksum
import yaml, tabulate
from exceptions import *
class Results(object):
def __init__(self):
self.files_total = 0
self.files_new = 0
self.files_deleted = 0
self.files_changed = 0
self.files_validated = 0
self.files_chksum_error = 0
self.files_size_error = 0
self.files_disk_error = 0
self.dirs_total = 0
self.dirs_missing = 0
self.dirs_new = 0
self.directory = ""
def __add__(self, other):
sumr = Results()
for attr in self.__dict__:
if attr.startswith('files') or attr.startswith('dirs'):
sumr.__dict__[attr] = self.__dict__[attr] + other.__dict__[attr]
elif attr == 'directory':
if type(self.__dict__[attr]) is not list:
src = [self.__dict__[attr]]
else:
src = copy.copy(self.__dict__[attr])
if type(other.__dict__[attr]) is list:
src.extend(copy.copy(other.__dict__[attr]))
else:
src.append(other.__dict__[attr])
sumr.__dict__[attr] = src
return sumr
def __str__ (self):
res = []
headers = [x for x in self.__dict__ if x.startswith('dirs_')]
table = [ [self.__dict__[x] for x in headers] ]
res.append(tabulate.tabulate(table, headers))
headers = [x for x in self.__dict__ if x.startswith('files_')]
table = [ [self.__dict__[x] for x in headers] ]
res.append(tabulate.tabulate(table, headers))
return '\n\n'.join(res)
class DirChecksum(object):
def __init__ (self, path, dbname, work_tally):
self.path = path
self.work_tally = work_tally
if not os.path.exists(self.path):
raise DirectoryMissing('%s does not exist' % self.path)
self.fc = FileChecksum()
self.dbname = dbname
self.results = Results()
self.results.directory = self.path
self.update_hash_files = False
self.force_update_hash_files = False
self.freshen_hash_files = False
def generate_checksum(self, checksum_filename):
root, dirs, files = os.walk(self.path).next()
hashes = { 'dirs': dirs,
'files': {}
}
for filename in files:
entry = self._gen_file_checksum(os.path.join(root,filename))
if filename != self.dbname:
hashes['files'][filename] = entry
self.results.files_new += 1
# Write out the hashes for the current directory
logging.debug(hashes)
return hashes
def _gen_file_checksum(self, filename):
fstat = os.stat(filename)
file_entry = { 'size': fstat.st_size,
'mtime': fstat.st_mtime,
}
_hash = self.fc.get_hash(filename)
if _hash:
file_entry['hash'] = _hash
else:
# Hmm, some kind of error (IOError!)
print("ERROR: file %s disk error while generating checksum" % (filename))
file_entry['hash'] = ""
self.results.files_disk_error += 1
return file_entry
def _load_checksums(self, checksum_file):
with open(checksum_file) as f:
hashes = yaml.load(f)
return hashes
def _save_checksums(self, hashes, checksum_file):
with open(checksum_file, 'w') as f:
f.write(yaml.dump(hashes))
def _check_hashes(self, root, hashes, checksum_file):
file_hashes = copy.deepcopy(hashes['files'])
update = False
#print("Checking %d files" % (len(hashes['files'])))
if self.freshen_hash_files:
for f, stats in hashes['files'].items():
if stats['hash'] == '' or stats['hash'] is None:
full_path = os.path.join(root, f)
self.results.files_new += 1
print("Freshening file %s" % (f))
file_hashes[f] = self._gen_file_checksum(full_path)
update = True
else:
for f, stats in hashes['files'].items():
full_path = os.path.join(root, f)
fstat = os.stat(full_path)
if fstat.st_mtime != int(stats['mtime']):
print("File %s changed, updating hash" % (f))
self.results.files_changed += 1
if self.update_hash_files:
file_hashes[f] = self._gen_file_checksum(full_path)
update = True
elif fstat.st_size != long(stats['size']):
print("ERROR: file %s has changed in size from %s to %s" % (f, stats['size'], fstat.st_size))
self.results.files_size_error += 1
if self.force_update_hash_files:
file_hashes[f] = self._gen_file_checksum(full_path)
update = True
print("Updating checksum to new value")
else:
print("Use -f option and rerun to force new checksum computation to accept changed file and get rid of this error")
else:
# mtime and size look good, so now check the hashes
#print (full_path)
new_hash = self._gen_file_checksum(full_path)
if new_hash['hash'] != stats.get('hash',""):
print("ERROR: file %s hash has changed from %s to %s" % (f, stats['hash'], new_hash['hash']))
self.results.files_chksum_error += 1
if self.force_update_hash_files:
file_hashes[f] = new_hash
update=True
print("Updating checksum to new value")
else:
print("Use -f option and rerun to force new checksum computation to accept changed file and get rid of this error")
else:
self.results.files_validated += 1
if update:
hashes['files'] = file_hashes
self._save_checksums(hashes,checksum_file)
def _are_sub_dirs_same(self, hashes, root, dirs):
self.results.dirs_total += len(dirs)
if 'dirs' in hashes:
# Just a check for backwards compatibility with old versions that
# did not save the subdirectory names
hashes_set = set(hashes['dirs'])
disk_set = set(dirs)
new_dirs = disk_set - hashes_set
if len(new_dirs) != 0:
print("New sub-directories found:")
print ('\n'.join(["- %s" % (os.path.join(root,x)) for x in new_dirs]))
self.results.dirs_new += len(new_dirs)
missing_dirs = hashes_set - disk_set
if len(missing_dirs) != 0:
print("Missing sub-directories from last scan found:")
print ('\n'.join(["- %s" % (os.path.join(root,x)) for x in missing_dirs]))
self.results.dirs_missing += len(missing_dirs)
if disk_set != hashes_set:
# There were differences, so we let's update the hashes
hashes['dirs'] = dirs
return False
else:
return True
else:
# Ah ha, the hashes files was created by an old version of this program
# so just add it now
hashes['dirs'] = copy.deepcopy(dirs)
self.results.dirs_new += len(dirs)
#print hashes
return False
def _validate_hashes(self, hashes, checksum_file):
file_hashes = hashes['files']
root, dirs, files = os.walk(self.path).next()
# First, make sure the sub-directories previously recorded are all here
if not self._are_sub_dirs_same(hashes, root, dirs):
# Uh oh, sub directory hashes were different, so let's update the hash file
if self.update_hash_files:
self._save_checksums(hashes, checksum_file)
set_filenames_hashes = set(file_hashes.keys())
set_filenames_disk = set(files)
set_filenames_disk.remove(self.dbname)
if set_filenames_hashes != set_filenames_disk: # Uh oh, different number of files on disk vs hash file
# Remove any missing files and mark it
missing_files = set_filenames_hashes - set_filenames_disk
if len(missing_files) > 0: # Files on disk deleted
print("Missing files since last validation")
for f in missing_files:
print(f)
self.results.files_deleted += 1
del file_hashes[f]
# Check all files previously checked minus the missing ones
self._check_hashes(root, hashes, checksum_file)
# Add in the new files since last check
new_files = set_filenames_disk - set_filenames_hashes
if len(new_files) > 0: # New files on disk
print("New files detected since last validation")
for f in new_files:
file_hashes[f] = self._gen_file_checksum(os.path.join(root,f))
self.results.files_new += 1
if self.update_hash_files:
self._save_checksums(hashes, checksum_file)
else:
self._check_hashes(root, hashes, checksum_file)
def tally_dir(self, path):
root, dirs, files = os.walk(path).next()
self.work_tally['dirs'] -= 1
file_size_list = [os.stat(os.path.join(root,f)).st_size for f in files if f != self.dbname]
self.work_tally['files'] -= len(file_size_list)
self.work_tally['size'] -= sum(file_size_list)
def validate(self):
#self.update_hash_files = update_hash_files
checksum_filename = os.path.join(self.path, self.dbname)
print("Remaining: %d dirs, %d files, %7.2fGB" % (self.work_tally['dirs'], self.work_tally['files'], float(self.work_tally['size'])/2**30))
if not os.path.isfile(checksum_filename):
print("Generating checksums for new directory %s" % self.path)
hashes = self.generate_checksum(checksum_filename)
self._save_checksums(hashes, checksum_filename)
else:
#print ("Validating %s " % (self.path))
hashes = self._load_checksums(checksum_filename)
self._validate_hashes(hashes, checksum_filename)
self.tally_dir(self.path)
|
wongma7/kubernetes
|
refs/heads/master
|
vendor/github.com/ugorji/go/codec/test.py
|
1516
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
ThomasFeher/audacity
|
refs/heads/master
|
lib-src/lv2/lilv/waflib/Tools/intltool.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re
from waflib import Configure,TaskGen,Task,Utils,Runner,Options,Build,Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature,before_method
from waflib.Logs import error
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
try:self.meths.remove('process_source')
except ValueError:pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR=self.env.PREFIX+'/share/locale'
for i in self.to_list(self.source):
node=self.path.find_resource(i)
podir=getattr(self,'podir','po')
podirnode=self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r"%podir)
continue
cache=getattr(self,'intlcache','.intlcache')
self.env['INTLCACHE']=os.path.join(self.path.bldpath(),podir,cache)
self.env['INTLPODIR']=podirnode.bldpath()
self.env['INTLFLAGS']=getattr(self,'flags',['-q','-u','-c'])
task=self.create_task('intltool',node,node.change_ext(''))
inst=getattr(self,'install_path','${LOCALEDIR}')
if inst:
self.bld.install_files(inst,task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
try:self.meths.remove('process_source')
except ValueError:pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR=self.env.PREFIX+'/share/locale'
appname=getattr(self,'appname','set_your_app_name')
podir=getattr(self,'podir','')
inst=getattr(self,'install_path','${LOCALEDIR}')
linguas=self.path.find_node(os.path.join(podir,'LINGUAS'))
if linguas:
file=open(linguas.abspath())
langs=[]
for line in file.readlines():
if not line.startswith('#'):
langs+=line.split()
file.close()
re_linguas=re.compile('[-a-zA-Z_@.]+')
for lang in langs:
if re_linguas.match(lang):
node=self.path.find_resource(os.path.join(podir,re_linguas.match(lang).group()+'.po'))
task=self.create_task('po',node,node.change_ext('.mo'))
if inst:
filename=task.outputs[0].name
(langname,ext)=os.path.splitext(filename)
inst_file=inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+appname+'.mo'
self.bld.install_as(inst_file,task.outputs[0],chmod=getattr(self,'chmod',Utils.O644),env=task.env)
else:
Logs.pprint('RED',"Error no LINGUAS file found in po directory")
class po(Task.Task):
run_str='${MSGFMT} -o ${TGT} ${SRC}'
color='BLUE'
class intltool(Task.Task):
run_str='${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color='BLUE'
def configure(conf):
conf.find_program('msgfmt',var='MSGFMT')
conf.find_perl_program('intltool-merge',var='INTLTOOL')
prefix=conf.env.PREFIX
datadir=conf.env.DATADIR
if not datadir:
datadir=os.path.join(prefix,'share')
conf.define('LOCALEDIR',os.path.join(datadir,'locale').replace('\\','\\\\'))
conf.define('DATADIR',datadir.replace('\\','\\\\'))
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
|
nagyistoce/odoo-dev-odoo
|
refs/heads/8.0
|
addons/mass_mailing/models/res_config.py
|
385
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class MassMailingConfiguration(osv.TransientModel):
_name = 'marketing.config.settings'
_inherit = 'marketing.config.settings'
_columns = {
'group_mass_mailing_campaign': fields.boolean(
'Manage Mass Mailing using Campaign',
implied_group='mass_mailing.group_mass_mailing_campaign',
help="""Manage mass mailign using Campaigns"""),
}
|
ghm1/ardupilotIRLock
|
refs/heads/Copter-3.3
|
Tools/autotest/param_metadata/xmlemit.py
|
238
|
#!/usr/bin/env python
from xml.sax.saxutils import escape, quoteattr
from param import *
from emit import Emit
# Emit APM documentation in an machine readable XML format
class XmlEmit(Emit):
def __init__(self):
wiki_fname = 'apm.pdef.xml'
self.f = open(wiki_fname, mode='w')
preamble = '''<?xml version="1.0" encoding="utf-8"?>
<!-- Dynamically generated list of documented parameters (generated by param_parse.py) -->
<paramfile>
<vehicles>
'''
self.f.write(preamble)
def close(self):
self.f.write('</libraries>')
self.f.write('''</paramfile>\n''')
self.f.close
def emit_comment(self, s):
self.f.write("<!-- " + s + " -->")
def start_libraries(self):
self.f.write('</vehicles>')
self.f.write('<libraries>')
def emit(self, g, f):
t = '''<parameters name=%s>\n''' % quoteattr(g.name) # i.e. ArduPlane
for param in g.params:
# Begin our parameter node
if hasattr(param, 'DisplayName'):
t += '<param humanName=%s name=%s' % (quoteattr(param.DisplayName),quoteattr(param.name)) # i.e. ArduPlane (ArduPlane:FOOPARM)
else:
t += '<param name=%s' % quoteattr(param.name)
if hasattr(param, 'Description'):
t += ' documentation=%s' % quoteattr(param.Description) # i.e. parameter docs
if hasattr(param, 'User'):
t += ' user=%s' % quoteattr(param.User) # i.e. Standard or Advanced
t += ">\n"
# Add values as chidren of this node
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
t+= "<values>\n"
values = (param.__dict__[field]).split(',')
for value in values:
v = value.split(':')
t+='''<value code=%s>%s</value>\n''' % (quoteattr(v[0]), escape(v[1])) # i.e. numeric value, string label
t += "</values>\n"
else:
t += '''<field name=%s>%s</field>\n''' % (quoteattr(field), escape(param.__dict__[field])) # i.e. Range: 0 10
t += '''</param>\n'''
t += '''</parameters>\n'''
#print t
self.f.write(t)
|
linjoahow/2015cdaa-w11
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/browser/timer.py
|
610
|
from browser import window
def wrap(func):
# Transforms a function f into another function that prints a
# traceback in case of exception
def f(*args, **kw):
try:
return func(*args, **kw)
except Exception as exc:
sys.stderr.write(exc)
return f
clear_interval = window.clearInterval
clear_timeout = window.clearTimeout
def set_interval(func,interval):
return window.setInterval(wrap(func),interval)
def set_timeout(func,interval):
return int(window.setTimeout(wrap(func),interval))
def request_animation_frame(func):
return int(window.requestAnimationFrame(func))
def cancel_animation_frame(int_id):
window.cancelAnimationFrame(int_id)
|
gwpy/gwpy.github.io
|
refs/heads/master
|
docs/latest/examples/miscellaneous/range-timeseries-4.py
|
3
|
plot = h1range.plot(label='LIGO-Hanford', color='gwpy:ligo-hanford',
figsize=(12, 5))
ax = plot.gca()
ax.plot(l1range, label='LIGO-Livingston', color='gwpy:ligo-livingston')
ax.set_ylabel('Angle-averaged sensitive distance [Mpc]')
ax.set_title('LIGO sensitivity to BNS around GW150914')
ax.set_epoch(1126259462) # <- set 0 on plot to GW150914
ax.legend()
plot.show()
|
Honzin/ccs
|
refs/heads/master
|
tests/testResponse/testPoloniex/testTicker.py
|
1
|
import unittest
import ccs
import datetime
####################################################################################################################
# POLONIEX #
####################################################################################################################
class Valid(unittest.TestCase):
def setUp(self):
self.tz = datetime.timezone.utc
self.json = '{"BTC_BBR":{"id":6,"last":"0.00008635","lowestAsk":"0.00008630","highestBid":"0.00008530","percentChange":"-0.01054199","baseVolume":"2.11645424","quoteVolume":"24981.30391249","isFrozen":"0","high24hr":"0.00008849","low24hr":"0.00008101"},"BTC_BCN":{"id":7,"last":"0.00000005","lowestAsk":"0.00000006","highestBid":"0.00000005","percentChange":"0.00000000","baseVolume":"0.37978404","quoteVolume":"7017786.39329961","isFrozen":"0","high24hr":"0.00000006","low24hr":"0.00000005"},"BTC_BELA":{"id":8,"last":"0.00001303","lowestAsk":"0.00001304","highestBid":"0.00001296","percentChange":"0.01086113","baseVolume":"2.38734134","quoteVolume":"185473.32707926","isFrozen":"0","high24hr":"0.00001343","low24hr":"0.00001270"},"BTC_BITS":{"id":9,"last":"0.00000666","lowestAsk":"0.00000667","highestBid":"0.00000666","percentChange":"0.05379746","baseVolume":"1.02445851","quoteVolume":"159086.24388984","isFrozen":"0","high24hr":"0.00000696","low24hr":"0.00000597"},"BTC_BLK":{"id":10,"last":"0.00003196","lowestAsk":"0.00003206","highestBid":"0.00003195","percentChange":"-0.00249687","baseVolume":"0.88219567","quoteVolume":"27558.82317330","isFrozen":"0","high24hr":"0.00003269","low24hr":"0.00003170"},"BTC_BTCD":{"id":12,"last":"0.00451834","lowestAsk":"0.00457362","highestBid":"0.00451834","percentChange":"-0.03563485","baseVolume":"3.61997463","quoteVolume":"787.94449925","isFrozen":"0","high24hr":"0.00482240","low24hr":"0.00430001"},"BTC_BTM":{"id":13,"last":"0.00010667","lowestAsk":"0.00010666","highestBid":"0.00010501","percentChange":"0.11743138","baseVolume":"18.10040020","quoteVolume":"185836.04203701","isFrozen":"0","high24hr":"0.00010895","low24hr":"0.00008818"},"BTC_BTS":{"id":14,"last":"0.00000431","lowestAsk":"0.00000431","highestBid":"0.00000430","percentChange":"-0.03363228","baseVolume":"33.80647566","quoteVolume":"7751009.21707261","isFrozen":"0","high24hr":"0.00000454","low24hr":"0.00000427"},"BTC_BURST":{"id":15,"last":"0.00000061","lowestAsk":"0.00000061","highestBid":"0.00000060","percentChange":"0.01666666","baseVolume":"4.68025771","quoteVolume":"7757340.67652101","isFrozen":"0","high24hr":"0.00000062","low24hr":"0.00000058"},"BTC_C2":{"id":16,"last":"0.00000104","lowestAsk":"0.00000105","highestBid":"0.00000104","percentChange":"-0.01886792","baseVolume":"1.04815261","quoteVolume":"1003954.16645076","isFrozen":"0","high24hr":"0.00000107","low24hr":"0.00000101"},"BTC_CLAM":{"id":20,"last":"0.00101821","lowestAsk":"0.00101820","highestBid":"0.00101221","percentChange":"0.01819981","baseVolume":"12.82409171","quoteVolume":"12581.30944441","isFrozen":"0","high24hr":"0.00107416","low24hr":"0.00098714"},"BTC_CURE":{"id":22,"last":"0.00003915","lowestAsk":"0.00003912","highestBid":"0.00003873","percentChange":"-0.00330957","baseVolume":"1.26509319","quoteVolume":"31729.42080004","isFrozen":"0","high24hr":"0.00004092","low24hr":"0.00003814"},"BTC_DASH":{"id":24,"last":"0.01650001","lowestAsk":"0.01652300","highestBid":"0.01650001","percentChange":"0.05141880","baseVolume":"711.42090333","quoteVolume":"44002.82091258","isFrozen":"0","high24hr":"0.01652370","low24hr":"0.01560054"},"BTC_DGB":{"id":25,"last":"0.00000030","lowestAsk":"0.00000031","highestBid":"0.00000030","percentChange":"-0.03225806","baseVolume":"14.73799079","quoteVolume":"47874151.51327055","isFrozen":"0","high24hr":"0.00000032","low24hr":"0.00000029"},"BTC_DOGE":{"id":27,"last":"0.00000024","lowestAsk":"0.00000024","highestBid":"0.00000023","percentChange":"0.04347826","baseVolume":"25.66215714","quoteVolume":"107706476.62933750","isFrozen":"0","high24hr":"0.00000025","low24hr":"0.00000023"},"BTC_EMC2":{"id":28,"last":"0.00000118","lowestAsk":"0.00000120","highestBid":"0.00000119","percentChange":"0.07272727","baseVolume":"0.75271627","quoteVolume":"639575.31362296","isFrozen":"0","high24hr":"0.00000120","low24hr":"0.00000113"},"BTC_FLDC":{"id":31,"last":"0.00000135","lowestAsk":"0.00000134","highestBid":"0.00000132","percentChange":"0.02272727","baseVolume":"5.03019341","quoteVolume":"3719487.72766819","isFrozen":"0","high24hr":"0.00000144","low24hr":"0.00000126"},"BTC_FLO":{"id":32,"last":"0.00000384","lowestAsk":"0.00000386","highestBid":"0.00000383","percentChange":"0.04632152","baseVolume":"1.47974167","quoteVolume":"380109.34133527","isFrozen":"0","high24hr":"0.00000405","low24hr":"0.00000376"},"BTC_GAME":{"id":38,"last":"0.00019025","lowestAsk":"0.00019025","highestBid":"0.00018961","percentChange":"-0.03082017","baseVolume":"35.89042784","quoteVolume":"186211.45131562","isFrozen":"0","high24hr":"0.00019740","low24hr":"0.00018880"},"BTC_GRC":{"id":40,"last":"0.00000603","lowestAsk":"0.00000603","highestBid":"0.00000599","percentChange":"-0.05039370","baseVolume":"1.67731366","quoteVolume":"277488.75695017","isFrozen":"0","high24hr":"0.00000649","low24hr":"0.00000599"},"BTC_HUC":{"id":43,"last":"0.00001702","lowestAsk":"0.00001720","highestBid":"0.00001701","percentChange":"0.24324324","baseVolume":"12.14608138","quoteVolume":"683538.58617888","isFrozen":"0","high24hr":"0.00001995","low24hr":"0.00001346"},"BTC_HZ":{"id":46,"last":"0.00000026","lowestAsk":"0.00000027","highestBid":"0.00000026","percentChange":"0.04000000","baseVolume":"1.81494649","quoteVolume":"6619876.77802451","isFrozen":"0","high24hr":"0.00000029","low24hr":"0.00000025"},"BTC_LTC":{"id":50,"last":"0.00429301","lowestAsk":"0.00430697","highestBid":"0.00429301","percentChange":"-0.03156814","baseVolume":"375.12727332","quoteVolume":"85758.17694889","isFrozen":"0","high24hr":"0.00447070","low24hr":"0.00426402"},"BTC_MAID":{"id":51,"last":"0.00013390","lowestAsk":"0.00013421","highestBid":"0.00013390","percentChange":"0.01385628","baseVolume":"384.37829357","quoteVolume":"2881772.44253851","isFrozen":"0","high24hr":"0.00013800","low24hr":"0.00012810"},"BTC_OMNI":{"id":58,"last":"0.00261903","lowestAsk":"0.00262146","highestBid":"0.00261903","percentChange":"0.01293713","baseVolume":"0.70550321","quoteVolume":"270.40895350","isFrozen":"0","high24hr":"0.00262146","low24hr":"0.00251265"},"BTC_MYR":{"id":59,"last":"0.00000024","lowestAsk":"0.00000025","highestBid":"0.00000023","percentChange":"-0.07692307","baseVolume":"4.16572707","quoteVolume":"16287042.80830805","isFrozen":"0","high24hr":"0.00000028","low24hr":"0.00000023"},"BTC_NAUT":{"id":60,"last":"0.00004490","lowestAsk":"0.00004500","highestBid":"0.00004490","percentChange":"0.01952770","baseVolume":"3.79830277","quoteVolume":"85267.91745584","isFrozen":"0","high24hr":"0.00004600","low24hr":"0.00004300"},"BTC_NAV":{"id":61,"last":"0.00004493","lowestAsk":"0.00004494","highestBid":"0.00004443","percentChange":"0.01905193","baseVolume":"20.79124252","quoteVolume":"467750.22508942","isFrozen":"0","high24hr":"0.00004500","low24hr":"0.00004344"},"BTC_NEOS":{"id":63,"last":"0.00007953","lowestAsk":"0.00008066","highestBid":"0.00007905","percentChange":"0.10827759","baseVolume":"4.36444448","quoteVolume":"53905.84464582","isFrozen":"0","high24hr":"0.00009202","low24hr":"0.00007072"},"BTC_NMC":{"id":64,"last":"0.00026173","lowestAsk":"0.00026195","highestBid":"0.00026173","percentChange":"0.00669256","baseVolume":"0.56752066","quoteVolume":"2167.83746283","isFrozen":"0","high24hr":"0.00026603","low24hr":"0.00025763"},"BTC_NOBL":{"id":65,"last":"0.00000008","lowestAsk":"0.00000008","highestBid":"0.00000007","percentChange":"0.00000000","baseVolume":"0.21492833","quoteVolume":"2760140.20716214","isFrozen":"0","high24hr":"0.00000008","low24hr":"0.00000007"},"BTC_NOTE":{"id":66,"last":"0.00000514","lowestAsk":"0.00000514","highestBid":"0.00000508","percentChange":"0.05761316","baseVolume":"1.28775969","quoteVolume":"257396.96274120","isFrozen":"0","high24hr":"0.00000539","low24hr":"0.00000472"},"BTC_NSR":{"id":68,"last":"0.00000019","lowestAsk":"0.00000020","highestBid":"0.00000019","percentChange":"-0.09523809","baseVolume":"2.94007444","quoteVolume":"14793309.38446928","isFrozen":"0","high24hr":"0.00000021","low24hr":"0.00000019"},"BTC_NXT":{"id":69,"last":"0.00000648","lowestAsk":"0.00000650","highestBid":"0.00000647","percentChange":"0.01408450","baseVolume":"26.38565986","quoteVolume":"4063529.76690177","isFrozen":"0","high24hr":"0.00000668","low24hr":"0.00000631"},"BTC_PINK":{"id":73,"last":"0.00000055","lowestAsk":"0.00000055","highestBid":"0.00000054","percentChange":"0.00000000","baseVolume":"1.16418388","quoteVolume":"2143985.17636690","isFrozen":"0","high24hr":"0.00000057","low24hr":"0.00000053"},"BTC_POT":{"id":74,"last":"0.00002004","lowestAsk":"0.00002020","highestBid":"0.00002004","percentChange":"-0.03930968","baseVolume":"106.89676519","quoteVolume":"5227734.63479616","isFrozen":"0","high24hr":"0.00002177","low24hr":"0.00001977"},"BTC_PPC":{"id":75,"last":"0.00032449","lowestAsk":"0.00032427","highestBid":"0.00032250","percentChange":"-0.02832759","baseVolume":"2.01045616","quoteVolume":"6139.09751135","isFrozen":"0","high24hr":"0.00033395","low24hr":"0.00032012"},"BTC_QBK":{"id":78,"last":"0.00016947","lowestAsk":"0.00016717","highestBid":"0.00016431","percentChange":"0.09004952","baseVolume":"0.67721937","quoteVolume":"4090.87434721","isFrozen":"0","high24hr":"0.00017966","low24hr":"0.00015009"},"BTC_QORA":{"id":79,"last":"0.00000008","lowestAsk":"0.00000008","highestBid":"0.00000007","percentChange":"0.00000000","baseVolume":"1.20468749","quoteVolume":"15137505.01045623","isFrozen":"0","high24hr":"0.00000009","low24hr":"0.00000007"},"BTC_QTL":{"id":80,"last":"0.00001444","lowestAsk":"0.00001449","highestBid":"0.00001444","percentChange":"0.01191310","baseVolume":"0.64093246","quoteVolume":"44109.21698551","isFrozen":"0","high24hr":"0.00001524","low24hr":"0.00001420"},"BTC_RBY":{"id":81,"last":"0.00024429","lowestAsk":"0.00024434","highestBid":"0.00024428","percentChange":"0.01432486","baseVolume":"1.89314047","quoteVolume":"7762.88796677","isFrozen":"0","high24hr":"0.00024611","low24hr":"0.00024084"},"BTC_RIC":{"id":83,"last":"0.00001144","lowestAsk":"0.00001144","highestBid":"0.00001124","percentChange":"0.07721280","baseVolume":"5.56867536","quoteVolume":"463345.05611026","isFrozen":"0","high24hr":"0.00001372","low24hr":"0.00001000"},"BTC_SDC":{"id":84,"last":"0.00180500","lowestAsk":"0.00180974","highestBid":"0.00180500","percentChange":"-0.04374407","baseVolume":"13.54575010","quoteVolume":"7532.93533821","isFrozen":"0","high24hr":"0.00188807","low24hr":"0.00175197"},"BTC_SJCX":{"id":86,"last":"0.00016954","lowestAsk":"0.00016954","highestBid":"0.00016849","percentChange":"0.18286471","baseVolume":"49.06733747","quoteVolume":"297639.28606187","isFrozen":"0","high24hr":"0.00018284","low24hr":"0.00013728"},"BTC_STR":{"id":89,"last":"0.00000279","lowestAsk":"0.00000279","highestBid":"0.00000278","percentChange":"-0.02447552","baseVolume":"115.23419696","quoteVolume":"42151354.76938477","isFrozen":"0","high24hr":"0.00000292","low24hr":"0.00000263"},"BTC_SYS":{"id":92,"last":"0.00000949","lowestAsk":"0.00000949","highestBid":"0.00000942","percentChange":"0.00529661","baseVolume":"15.71299356","quoteVolume":"1666032.15506082","isFrozen":"0","high24hr":"0.00000977","low24hr":"0.00000915"},"BTC_UNITY":{"id":95,"last":"0.00251663","lowestAsk":"0.00259995","highestBid":"0.00249995","percentChange":"0.09860526","baseVolume":"0.90393126","quoteVolume":"362.59841109","isFrozen":"0","high24hr":"0.00261936","low24hr":"0.00225420"},"BTC_VIA":{"id":97,"last":"0.00003600","lowestAsk":"0.00003623","highestBid":"0.00003600","percentChange":"-0.00853759","baseVolume":"1.69466689","quoteVolume":"47456.37417358","isFrozen":"0","high24hr":"0.00003714","low24hr":"0.00003471"},"BTC_XVC":{"id":98,"last":"0.00004590","lowestAsk":"0.00004590","highestBid":"0.00004545","percentChange":"-0.01120206","baseVolume":"1.05009308","quoteVolume":"22592.04079888","isFrozen":"0","high24hr":"0.00004730","low24hr":"0.00004481"},"BTC_VRC":{"id":99,"last":"0.00002473","lowestAsk":"0.00002485","highestBid":"0.00002473","percentChange":"0.00121457","baseVolume":"1.18260459","quoteVolume":"48563.32444985","isFrozen":"0","high24hr":"0.00002489","low24hr":"0.00002313"},"BTC_VTC":{"id":100,"last":"0.00003357","lowestAsk":"0.00003456","highestBid":"0.00003357","percentChange":"-0.06775895","baseVolume":"7.19565351","quoteVolume":"191719.88409985","isFrozen":"0","high24hr":"0.00004010","low24hr":"0.00003350"},"BTC_XBC":{"id":104,"last":"0.00196572","lowestAsk":"0.00198837","highestBid":"0.00194671","percentChange":"0.02891419","baseVolume":"5.07747043","quoteVolume":"2319.88980524","isFrozen":"0","high24hr":"0.00239836","low24hr":"0.00188202"},"BTC_XCP":{"id":108,"last":"0.00223457","lowestAsk":"0.00224268","highestBid":"0.00223457","percentChange":"-0.02500141","baseVolume":"20.32868447","quoteVolume":"9098.50892615","isFrozen":"0","high24hr":"0.00230000","low24hr":"0.00216275"},"BTC_XEM":{"id":112,"last":"0.00000401","lowestAsk":"0.00000402","highestBid":"0.00000401","percentChange":"-0.00248756","baseVolume":"38.38975543","quoteVolume":"9562328.95120402","isFrozen":"0","high24hr":"0.00000414","low24hr":"0.00000388"},"BTC_XMG":{"id":113,"last":"0.00002368","lowestAsk":"0.00002396","highestBid":"0.00002368","percentChange":"0.00381517","baseVolume":"0.89012427","quoteVolume":"36331.66920583","isFrozen":"0","high24hr":"0.00002645","low24hr":"0.00002327"},"BTC_XMR":{"id":114,"last":"0.01336000","lowestAsk":"0.01336500","highestBid":"0.01334980","percentChange":"-0.01822457","baseVolume":"1743.98093429","quoteVolume":"129674.24663044","isFrozen":"0","high24hr":"0.01374708","low24hr":"0.01315000"},"BTC_XPM":{"id":116,"last":"0.00005590","lowestAsk":"0.00005668","highestBid":"0.00005587","percentChange":"-0.00480683","baseVolume":"2.32500042","quoteVolume":"40715.63849309","isFrozen":"0","high24hr":"0.00006241","low24hr":"0.00005517"},"BTC_XRP":{"id":117,"last":"0.00000757","lowestAsk":"0.00000757","highestBid":"0.00000754","percentChange":"-0.02699228","baseVolume":"456.13163721","quoteVolume":"59610366.65714734","isFrozen":"0","high24hr":"0.00000790","low24hr":"0.00000743"},"USDT_BTC":{"id":121,"last":"888.00005020","lowestAsk":"888.00005020","highestBid":"888.00005007","percentChange":"0.03617275","baseVolume":"1292844.66938989","quoteVolume":"1470.67214446","isFrozen":"0","high24hr":"899.98000000","low24hr":"850.00000000"},"USDT_DASH":{"id":122,"last":"14.74805612","lowestAsk":"14.74806130","highestBid":"14.72000000","percentChange":"0.09602081","baseVolume":"27604.94873539","quoteVolume":"1942.87851462","isFrozen":"0","high24hr":"14.89999906","low24hr":"13.41468572"},"USDT_LTC":{"id":123,"last":"3.84534456","lowestAsk":"3.84534452","highestBid":"3.82821074","percentChange":"0.00637437","baseVolume":"7540.10204089","quoteVolume":"1958.39854700","isFrozen":"0","high24hr":"3.91000000","low24hr":"3.80000000"},"USDT_NXT":{"id":124,"last":"0.00580706","lowestAsk":"0.00580706","highestBid":"0.00576827","percentChange":"0.02962056","baseVolume":"2102.64303066","quoteVolume":"366136.07037977","isFrozen":"0","high24hr":"0.00586072","low24hr":"0.00550000"},"USDT_STR":{"id":125,"last":"0.00247838","lowestAsk":"0.00247717","highestBid":"0.00245797","percentChange":"-0.02426752","baseVolume":"555.16886508","quoteVolume":"228199.11447927","isFrozen":"0","high24hr":"0.00253000","low24hr":"0.00230000"},"USDT_XMR":{"id":126,"last":"11.89888000","lowestAsk":"11.89888000","highestBid":"11.83330444","percentChange":"0.02566494","baseVolume":"59304.22953897","quoteVolume":"4988.97341933","isFrozen":"0","high24hr":"12.24000000","low24hr":"11.50000000"},"USDT_XRP":{"id":127,"last":"0.00669698","lowestAsk":"0.00673797","highestBid":"0.00669000","percentChange":"-0.00793564","baseVolume":"10291.40845269","quoteVolume":"1526390.75038825","isFrozen":"0","high24hr":"0.00684869","low24hr":"0.00660001"},"XMR_BBR":{"id":128,"last":"0.00652019","lowestAsk":"0.00648766","highestBid":"0.00638101","percentChange":"0.02148022","baseVolume":"5.15513338","quoteVolume":"806.39705582","isFrozen":"0","high24hr":"0.00661663","low24hr":"0.00624230"},"XMR_BCN":{"id":129,"last":"0.00000402","lowestAsk":"0.00000402","highestBid":"0.00000396","percentChange":"0.02030456","baseVolume":"4.88896376","quoteVolume":"1200339.22402296","isFrozen":"0","high24hr":"0.00000450","low24hr":"0.00000385"},"XMR_BLK":{"id":130,"last":"0.00239901","lowestAsk":"0.00239582","highestBid":"0.00238032","percentChange":"0.02942362","baseVolume":"2.90087791","quoteVolume":"1228.21784998","isFrozen":"0","high24hr":"0.00241024","low24hr":"0.00232780"},"XMR_BTCD":{"id":131,"last":"0.33870852","lowestAsk":"0.34050491","highestBid":"0.33763182","percentChange":"0.01845109","baseVolume":"13.11109140","quoteVolume":"38.92625988","isFrozen":"0","high24hr":"0.35651702","low24hr":"0.31937106"},"XMR_DASH":{"id":132,"last":"1.23596910","lowestAsk":"1.23723326","highestBid":"1.23600000","percentChange":"0.07264288","baseVolume":"414.23907854","quoteVolume":"342.26183442","isFrozen":"0","high24hr":"1.23723326","low24hr":"1.15101902"},"XMR_LTC":{"id":137,"last":"0.32190700","lowestAsk":"0.32267335","highestBid":"0.32190700","percentChange":"-0.01964847","baseVolume":"243.04408132","quoteVolume":"745.08887374","isFrozen":"0","high24hr":"0.33388527","low24hr":"0.31696237"},"XMR_MAID":{"id":138,"last":"0.01002517","lowestAsk":"0.01008608","highestBid":"0.01002517","percentChange":"-0.00115973","baseVolume":"28.09623230","quoteVolume":"2852.22084155","isFrozen":"0","high24hr":"0.01022643","low24hr":"0.00954149"},"XMR_NXT":{"id":140,"last":"0.00048994","lowestAsk":"0.00048642","highestBid":"0.00048350","percentChange":"0.06167114","baseVolume":"50.38362305","quoteVolume":"103367.00495534","isFrozen":"0","high24hr":"0.00049673","low24hr":"0.00047000"},"XMR_QORA":{"id":141,"last":"0.00000589","lowestAsk":"0.00000598","highestBid":"0.00000592","percentChange":"-0.07244094","baseVolume":"13.61027478","quoteVolume":"2264218.51127596","isFrozen":"0","high24hr":"0.00000618","low24hr":"0.00000563"},"BTC_IOC":{"id":143,"last":"0.00038511","lowestAsk":"0.00038966","highestBid":"0.00038548","percentChange":"0.01518386","baseVolume":"7.77888376","quoteVolume":"19959.68676988","isFrozen":"0","high24hr":"0.00040843","low24hr":"0.00036819"},"BTC_ETH":{"id":148,"last":"0.01143566","lowestAsk":"0.01143566","highestBid":"0.01143563","percentChange":"-0.00602608","baseVolume":"2346.14463205","quoteVolume":"203025.40563212","isFrozen":"0","high24hr":"0.01178128","low24hr":"0.01131000"},"USDT_ETH":{"id":149,"last":"10.20000000","lowestAsk":"10.20000000","highestBid":"10.16100001","percentChange":"0.03235209","baseVolume":"133390.00811125","quoteVolume":"13157.01303779","isFrozen":"0","high24hr":"10.31591957","low24hr":"9.72753980"},"BTC_SC":{"id":150,"last":"0.00000041","lowestAsk":"0.00000041","highestBid":"0.00000040","percentChange":"0.00000000","baseVolume":"69.26206838","quoteVolume":"168893787.49195519","isFrozen":"0","high24hr":"0.00000043","low24hr":"0.00000040"},"BTC_BCY":{"id":151,"last":"0.00015449","lowestAsk":"0.00015449","highestBid":"0.00015246","percentChange":"-0.02952446","baseVolume":"8.71779070","quoteVolume":"55085.83547105","isFrozen":"0","high24hr":"0.00016816","low24hr":"0.00014735"},"BTC_EXP":{"id":153,"last":"0.00025537","lowestAsk":"0.00025638","highestBid":"0.00025534","percentChange":"0.01664078","baseVolume":"18.62110356","quoteVolume":"72573.31131151","isFrozen":"0","high24hr":"0.00026899","low24hr":"0.00024800"},"BTC_FCT":{"id":155,"last":"0.00339425","lowestAsk":"0.00339536","highestBid":"0.00339425","percentChange":"-0.00139746","baseVolume":"168.73759917","quoteVolume":"49688.89299026","isFrozen":"0","high24hr":"0.00344887","low24hr":"0.00331701"},"BTC_RADS":{"id":158,"last":"0.00044999","lowestAsk":"0.00044999","highestBid":"0.00044084","percentChange":"0.05797851","baseVolume":"2.75261552","quoteVolume":"6378.00934624","isFrozen":"0","high24hr":"0.00045161","low24hr":"0.00041502"},"BTC_AMP":{"id":160,"last":"0.00006431","lowestAsk":"0.00006428","highestBid":"0.00006387","percentChange":"0.06315093","baseVolume":"162.78428019","quoteVolume":"2432995.67835763","isFrozen":"0","high24hr":"0.00007160","low24hr":"0.00006046"},"BTC_VOX":{"id":161,"last":"0.00001072","lowestAsk":"0.00001088","highestBid":"0.00001075","percentChange":"-0.01289134","baseVolume":"12.20575887","quoteVolume":"1115114.06865207","isFrozen":"0","high24hr":"0.00001175","low24hr":"0.00001057"},"BTC_DCR":{"id":162,"last":"0.00109009","lowestAsk":"0.00110000","highestBid":"0.00109009","percentChange":"0.15735550","baseVolume":"34.12975566","quoteVolume":"31946.52004508","isFrozen":"0","high24hr":"0.00120080","low24hr":"0.00094000"},"BTC_LSK":{"id":163,"last":"0.00017547","lowestAsk":"0.00017547","highestBid":"0.00017462","percentChange":"-0.04422898","baseVolume":"70.41784522","quoteVolume":"391799.57460854","isFrozen":"0","high24hr":"0.00018683","low24hr":"0.00017221"},"ETH_LSK":{"id":166,"last":"0.01534602","lowestAsk":"0.01532792","highestBid":"0.01521662","percentChange":"-0.03615452","baseVolume":"58.23858861","quoteVolume":"3704.08694520","isFrozen":"0","high24hr":"0.01599999","low24hr":"0.01532875"},"BTC_LBC":{"id":167,"last":"0.00002946","lowestAsk":"0.00002984","highestBid":"0.00002945","percentChange":"-0.00405679","baseVolume":"120.73414765","quoteVolume":"3977321.38100581","isFrozen":"0","high24hr":"0.00003299","low24hr":"0.00002738"},"BTC_STEEM":{"id":168,"last":"0.00018137","lowestAsk":"0.00018137","highestBid":"0.00018065","percentChange":"-0.00776847","baseVolume":"122.16279936","quoteVolume":"661597.39567921","isFrozen":"0","high24hr":"0.00019182","low24hr":"0.00017705"},"ETH_STEEM":{"id":169,"last":"0.01583986","lowestAsk":"0.01581824","highestBid":"0.01566771","percentChange":"0.01627595","baseVolume":"211.58521144","quoteVolume":"13111.11606151","isFrozen":"0","high24hr":"0.01700000","low24hr":"0.01561147"},"BTC_SBD":{"id":170,"last":"0.00112000","lowestAsk":"0.00112462","highestBid":"0.00111000","percentChange":"-0.01719039","baseVolume":"2.74273588","quoteVolume":"2414.16245856","isFrozen":"0","high24hr":"0.00115368","low24hr":"0.00109247"},"BTC_ETC":{"id":171,"last":"0.00130621","lowestAsk":"0.00131057","highestBid":"0.00130601","percentChange":"-0.03148286","baseVolume":"373.52230663","quoteVolume":"279183.76700259","isFrozen":"0","high24hr":"0.00138924","low24hr":"0.00128000"},"ETH_ETC":{"id":172,"last":"0.11460000","lowestAsk":"0.11470418","highestBid":"0.11397297","percentChange":"-0.02781088","baseVolume":"1328.38212186","quoteVolume":"11372.67517783","isFrozen":"0","high24hr":"0.11886622","low24hr":"0.11364330"},"USDT_ETC":{"id":173,"last":"1.16688449","lowestAsk":"1.17288630","highestBid":"1.16688450","percentChange":"0.00941564","baseVolume":"34988.92450523","quoteVolume":"29804.85174709","isFrozen":"0","high24hr":"1.21520060","low24hr":"1.14230000"},"BTC_REP":{"id":174,"last":"0.00498823","lowestAsk":"0.00499122","highestBid":"0.00498823","percentChange":"0.04686322","baseVolume":"145.44150418","quoteVolume":"29534.96745989","isFrozen":"0","high24hr":"0.00505999","low24hr":"0.00471960"},"USDT_REP":{"id":175,"last":"4.43519067","lowestAsk":"4.45000000","highestBid":"4.43127258","percentChange":"0.05610457","baseVolume":"3871.35950702","quoteVolume":"917.00895507","isFrozen":"0","high24hr":"4.49999999","low24hr":"4.10000000"},"ETH_REP":{"id":176,"last":"0.43665131","lowestAsk":"0.43702397","highestBid":"0.43350944","percentChange":"0.07815138","baseVolume":"1317.87568250","quoteVolume":"3066.80509179","isFrozen":"0","high24hr":"0.43999880","low24hr":"0.40872538"},"BTC_ARDR":{"id":177,"last":"0.00001551","lowestAsk":"0.00001551","highestBid":"0.00001550","percentChange":"0.08461538","baseVolume":"46.33269269","quoteVolume":"2949738.55582328","isFrozen":"0","high24hr":"0.00001676","low24hr":"0.00001430"},"BTC_ZEC":{"id":178,"last":"0.04891901","lowestAsk":"0.04891940","highestBid":"0.04891901","percentChange":"-0.01001579","baseVolume":"292.60991917","quoteVolume":"5947.35590283","isFrozen":"0","high24hr":"0.05077770","low24hr":"0.04815764"},"ETH_ZEC":{"id":179,"last":"4.28945005","lowestAsk":"4.28945006","highestBid":"4.28945005","percentChange":"0.00326430","baseVolume":"402.02020954","quoteVolume":"94.50204948","isFrozen":"0","high24hr":"4.34680600","low24hr":"4.18060517"},"USDT_ZEC":{"id":180,"last":"43.70000000","lowestAsk":"43.79627070","highestBid":"43.55000000","percentChange":"0.02580955","baseVolume":"27619.01064986","quoteVolume":"642.91968834","isFrozen":"0","high24hr":"44.40000000","low24hr":"41.70510000"},"XMR_ZEC":{"id":181,"last":"3.65890970","lowestAsk":"3.67108637","highestBid":"3.64647008","percentChange":"-0.00038760","baseVolume":"105.80525961","quoteVolume":"28.97425754","isFrozen":"0","high24hr":"3.78654568","low24hr":"3.57684988"},"BTC_STRAT":{"id":182,"last":"0.00008075","lowestAsk":"0.00008129","highestBid":"0.00008100","percentChange":"0.23282442","baseVolume":"221.01779754","quoteVolume":"2833986.49554478","isFrozen":"0","high24hr":"0.00008823","low24hr":"0.00006410"},"BTC_NXC":{"id":183,"last":"0.00003525","lowestAsk":"0.00003588","highestBid":"0.00003538","percentChange":"-0.00843881","baseVolume":"200.42386837","quoteVolume":"5973927.25696122","isFrozen":"0","high24hr":"0.00003700","low24hr":"0.00003006"}}'
# TODO - predelat
symbol = ccs.poloniex.Symbol(ccs.constants.BTC, ccs.constants.BBR)
self.ticker = ccs.poloniex.public.response.Ticker(self.json, symbol)
def testLow(self):
self.assertEqual(self.ticker.low(), 0.00008101)
def testHigh(self):
self.assertEqual(self.ticker.high(), 0.00008849)
def testAsk(self):
self.assertEqual(self.ticker.ask(), 0.00008630)
def testBid(self):
self.assertEqual(self.ticker.bid(), 0.00008530)
def testLast(self):
self.assertEqual(self.ticker.last(), 0.00008635)
def testVolume24h(self):
self.assertEqual(self.ticker.volume24h(), 2.11645424)
# @unittest.skip("timestamp is emulated")
def testTimestamp(self):
self.assertAlmostEqual(self.ticker.timestamp(), datetime.datetime.now().timestamp(), delta=50)
# @unittest.skip("timestamp is emulated")
def testDt(self):
dt = self.ticker.dt(tz=self.tz)
dtnow = datetime.datetime.now()
# This test should be valid in the middle of day (not midnight)
self.assertAlmostEqual(dt.year, dtnow.year, delta=1)
self.assertAlmostEqual(dt.month, dtnow.month, delta=1)
self.assertAlmostEqual(dt.day, dtnow.day, delta=1)
# This test should be valid in the middle of hour (not midnight)
self.assertAlmostEqual(dt.hour, dtnow.hour, delta=1)
self.assertAlmostEqual(dt.minute, dtnow.minute, delta=1)
def testSpread(self):
self.assertEqual(self.ticker.spread(), ((0.00008630 - 0.00008530) / 0.00008630) * 100)
|
aaxx/thumbor
|
refs/heads/master
|
tests/test_meta_transform.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from os.path import abspath, dirname, join
import json
from tornado.testing import AsyncHTTPTestCase
from tornado.options import options
from thumbor.app import ThumborServiceApp
get_conf_path = lambda filename: abspath(join(dirname(__file__), 'fixtures', filename))
class MetaHandlerTestCase(AsyncHTTPTestCase):
def get_app(self):
app = ThumborServiceApp(get_conf_path('default.py'))
return app
def test_meta_returns_200(self):
options.META_CALLBACK_NAME = None
self.http_client.fetch(self.get_url('/unsafe/meta/s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg'), self.stop)
response = self.wait()
self.assertEqual(200, response.code)
def test_meta_returns_appjson_code(self):
options.META_CALLBACK_NAME = None
self.http_client.fetch(self.get_url('/unsafe/meta/s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg'), self.stop)
response = self.wait()
assert response.code == 200
content_type = response.headers['Content-Type']
self.assertEqual("application/json", content_type)
def test_meta_returns_proper_json_for_no_ops(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/%s' % image_url), self.stop)
response = self.wait()
text = response.body
operations = json.loads(text)
assert operations
assert operations['thumbor']
assert operations['thumbor']['source']['url'] == image_url
assert operations['thumbor']['source']['width'] == 620
assert operations['thumbor']['source']['height'] == 349
assert "operations" in operations['thumbor']
assert not operations['thumbor']['operations']
def test_meta_returns_proper_json_for_resize_and_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/300x200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
operations = thumbor_json['thumbor']['operations']
assert len(operations) == 2
assert operations[0]['type'] == 'crop'
assert operations[0]['top'] == 0
assert operations[0]['right'] == 572
assert operations[0]['bottom'] == 349
assert operations[0]['left'] == 48
assert operations[1]['type'] == 'resize'
assert operations[1]['width'] == 300
assert operations[1]['height'] == 200
def test_meta_returns_proper_json_for_resize_and_manual_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:100x100/50x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 50
assert target['height'] == 50, target['height']
def test_meta_returns_proper_target_for_resize_and_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/300x200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 300
assert target['height'] == 200
def test_meta_returns_proper_target_for_crop(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:100x100/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
assert target['width'] == 100
assert target['height'] == 100
def test_meta_returns_proper_target_for_crop_and_resize(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/0x0:200x250/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target = thumbor_json['thumbor']['target']
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/50x40:250x290/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target_2 = thumbor_json['thumbor']['target']
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/250x80:450x330/200x0/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
target_3 = thumbor_json['thumbor']['target']
assert target['width'] == target_2['width']
assert target['height'] == target_2['height']
assert target['width'] == target_3['width']
assert target['height'] == target_3['height']
def test_meta_returns_proper_json_for_flip(self):
options.META_CALLBACK_NAME = None
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/-300x-200/%s' % image_url), self.stop)
response = self.wait()
text = response.body
thumbor_json = json.loads(text)
operations = thumbor_json['thumbor']['operations']
assert len(operations) == 4
assert operations[2]['type'] == 'flip_horizontally'
assert operations[3]['type'] == 'flip_vertically'
class MetaHandlerJSONPTestCase(AsyncHTTPTestCase):
def get_app(self):
return ThumborServiceApp(get_conf_path('jsonp.py'))
def test_meta_returns_proper_json_for_no_ops_with_callback(self):
image_url = "s.glbimg.com/es/ge/f/original/2011/03/22/boavista_x_botafogo.jpg"
self.http_client.fetch(self.get_url('/unsafe/meta/%s' % image_url), self.stop)
response = self.wait()
text = response.body
assert text.strip().startswith('callback({')
assert text.strip().endswith('});')
|
DarthMaulware/EquationGroupLeaks
|
refs/heads/master
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/uuid.py
|
1
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: uuid.py
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [
hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] + bytes_le[8:]
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long('%02x' * 16 % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node = fields
if not 0 <= time_low < 4294967296:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 65536:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 65536:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 256:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 256:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 281474976710656:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = clock_seq_hi_variant << 8 | clock_seq_low
int = time_low << 96 | time_mid << 80 | time_hi_version << 64 | clock_seq << 48 | node
if int is not None:
if not 0 <= int < 340282366920938463463374607431768211456:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
int &= -13835058055282163713
int |= 9223372036854775808
int &= -1133367955888714851287041
int |= version << 76
self.__dict__['int'] = int
return
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr(self.int >> shift & 255) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return bytes[3] + bytes[2] + bytes[1] + bytes[0] + bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:]
bytes_le = property(get_bytes_le)
def get_fields(self):
return (
self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96
time_low = property(get_time_low)
def get_time_mid(self):
return self.int >> 80 & 65535
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return self.int >> 64 & 65535
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return self.int >> 56 & 255
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return self.int >> 48 & 255
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (self.time_hi_version & 4095) << 48 | self.time_mid << 32 | self.time_low
time = property(get_time)
def get_clock_seq(self):
return (self.clock_seq_hi_variant & 63) << 8 | self.clock_seq_low
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 281474976710655
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & 9223372036854775808:
return RESERVED_NCS
else:
if not self.int & 4611686018427387904:
return RFC_4122
if not self.int & 2305843009213693952:
return RESERVED_MICROSOFT
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
if self.variant == RFC_4122:
return int(self.int >> 76 & 15)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
with os.popen(cmd) as pipe:
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
except IOError:
continue
return
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i + 1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
else:
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os
import re
dirs = [
'', 'c:\\windows\\system32', 'c:\\winnt\\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
else:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
finally:
pipe.close()
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet
import netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return (bytes[0] << 40) + (bytes[1] << 32) + (bytes[2] << 24) + (bytes[3] << 16) + (bytes[4] << 8) + bytes[5]
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes
import ctypes.util
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
import sys
if sys.platform == 'darwin':
import os
if int(os.uname()[2].split('.')[0]) >= 9:
_uuid_generate_random = _uuid_generate_time = None
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential', getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 281474976710656) | 1099511627776
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
else:
import sys
if sys.platform == 'win32':
getters = [
_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [
_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
return
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
global _last_timestamp
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
else:
import time
nanoseconds = int(time.time() * 1000000000.0)
timestamp = int(nanoseconds // 100) + 122192928000000000
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(16384)
time_low = timestamp & 4294967295
time_mid = timestamp >> 32 & 65535
time_hi_version = timestamp >> 48 & 4095
clock_seq_low = clock_seq & 255
clock_seq_hi_variant = clock_seq >> 8 & 63
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [ chr(random.randrange(256)) for i in range(16) ]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
|
pingf/PyModulesLearning
|
refs/heads/master
|
logging/context.py
|
1
|
import logging
import traceback
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p')
logging.debug('This message should appear on the console')
class A(object):
def test(self):
try:
raise Exception('WTF!')
except Exception as e:
stack = traceback.extract_stack()
(filename, line, procname, text) = stack[-1]
context='[loc]'+filename+':'+procname+':'+str(line)
logging.error('[error]'+str(e)+' '+context)
a = A()
a.test()
|
nagyistoce/OpenBird
|
refs/heads/master
|
cocos2d/tools/cpp-best-practise-formatter/cpp-best-practise-formatter.py
|
16
|
#!/usr/bin/env python
import os
import re
import sys
# list of extensions to replace
replace_extensions = [".cpp", ".h", ".hpp", ".cc", ".mm"]
files_to_skip = None
replace_type = None
skip_contents = ["CCPointMake", "CCSizeMake", "CCRectMake", "CCLOG", \
"CCLog", "CCAssert", "CCSkeleton"]
def try_to_replace(fname):
if replace_type == "add_namespace":
if fname.lower().endswith(".h"):
return True
return False
else:
if replace_extensions:
for replace_extension in replace_extensions:
if fname.lower().endswith(replace_extension):
return True
return False
def replacement_member_variable(m):
# print "group 2: ", m.group(2)
name_prefix = m.group(3)
first = name_prefix[0]
second = ""
if len(name_prefix) > 1:
second = name_prefix[1]
if first.isupper() and second.islower():
first = first.lower()
name_prefix = first + name_prefix[1:]
return m.group(1) + "_" + name_prefix
elif first.isupper() and second.isupper():
return m.group(1) + "_" + name_prefix
else:
print "don't convert: ", m.group(1) + m.group(2) + m.group(3)
if m.group(2) == "m_":
return m.group(1) + "_" + name_prefix
return m.group(1) + m.group(2) + m.group(3)
def remove_prefix_callback(m):
pos = 0
for skip_content in skip_contents:
start = 0
while True:
pos = m.string.find(skip_content, start)
if pos == -1:
break
if pos == m.end(1):
return m.group(1) + "CC" + m.group(2)
start = pos + 1
return m.group(1) + m.group(2)
def add_namespace_callback(m):
"""
Arguments:
- `m`:
"""
return m.group(1) + "cocos2d::" + m.group(2)
def replace_callback(m):
if replace_type == "replace_variable":
return replacement_member_variable(m)
elif replace_type == "remove_prefix":
return remove_prefix_callback(m)
elif replace_type == "add_namespace":
return add_namespace_callback(m)
else:
raise Exception("type error.")
def file_replace(fname, pat):
# first, see if the pattern is even in the file.
with open(fname) as f:
if not any(re.search(pat, line) for line in f):
# print "can't find the string..."
return # pattern does not occur in file so we are done.
# pattern is in the file, so perform replace operation.
with open(fname) as f:
out_fname = fname + ".tmp"
out = open(out_fname, "w")
for line in f:
repl = line
m = re.search(pat, line)
if m:
repl = re.sub(pat, replace_callback, line)
out.write(repl)
out.close()
os.rename(out_fname, fname)
def mass_replace(dir_name, s_before):
pat = re.compile(s_before)
for dirpath, dirnames, filenames in os.walk(dir_name):
for fname in filenames:
if try_to_replace(fname):
fullname = os.path.join(dirpath, fname)
need_skip = False
if files_to_skip != None:
for skip in files_to_skip:
if fullname.find(skip) != -1:
print "skip file: ", fullname
need_skip = True
break
if not need_skip:
file_replace(fullname, pat)
prefix_need_replace = [
"m_pob",
"m_ob",
"m_str",
"m_psz",
"m_sz",
"m_pfn",
"m_pf",
"m_s",
"m_p",
"m_b",
"m_n",
"m_h",
"m_u",
"m_c",
"m_e",
"m_f",
"m_d",
"m_t",
"m_i",
"m_"
]
def do_member_varible_replace(dir):
"""
Arguments:
- `dir`:
- `dir_skip`:
"""
for p in prefix_need_replace:
# mass_replace(".", p)
pat = "([^\w])(" + p + ')(\w{1,2})'
mass_replace(dir, pat)
pat = "(^)(" + p + ')(\w{1,2})'
mass_replace(dir, pat)
remove_prefix_patterns = [
"([^/_\"])CC([A-Z][a-z])", \
"(^)CC([A-Z][a-z])", \
"([^/_\"])CC(IME[A-Z][a-z])", \
"(^)CC(IME[A-Z][a-z])", \
"([^/_\"])CC(TMX)", \
"(^)CC(TMX)", \
"([^/_\"])CC(GL\w)", \
"(^)CC(GL\w)", \
"([^/_\"])CC(EGL)", \
"(^)CC(EGL)", \
"([^/_\"])CC(EGL)", \
"(^)CC(EGL)", \
"([^/_\"])CC(RGBA)", \
"(^)CC(RGBA)", \
"([^/_\"])CC(SAX)", \
"(^)CC(SAX)"
]
def do_remove_class_cc_prefix(dir):
for pat in remove_prefix_patterns:
mass_replace(dir, pat)
def do_add_namespace(dir):
"""
Arguments:
- `dir`:
"""
pat = "([\s(])(CC[A-Z][a-z])"
mass_replace(dir, pat)
def main():
"""
"""
from optparse import OptionParser
parser = OptionParser("usage: %prog cpp-best-practise-formatter -d DIR_NAME [-s FILES_TO_SKIP]")
parser.add_option("-t", "--type",
action="store", type="string", dest="type", default="all",
help='''all: Replaces hungary naming of member variables and removing CC prefix for classes.
remove_prefix: Only removes CC prefix for classes.
replace_variable: Replaces hungary naming of member variables.
''')
parser.add_option("-d", "--dir",
action="store", type="string", dest="dir_name", default=None,
help="The directory which contains sources to format")
parser.add_option("-s", "--skip",
action="append", type="string", dest="skips", default=None,
help="Files or directories to skip")
(options, args) = parser.parse_args(sys.argv)
if options.dir_name is None:
raise Exception("Don't set -d at the same time");
global replace_type
replace_type = options.type
global files_to_skip
files_to_skip = options.skips
if options.type == "all":
#replace_type = "add_namespace"
#do_add_namespace(options.dir_name)
replace_type = "replace_variable"
do_member_varible_replace(options.dir_name)
replace_type = "remove_prefix"
do_remove_class_cc_prefix(options.dir_name)
elif options.type == "replace_variable":
do_member_varible_replace(options.dir_name)
elif options.type == "remove_prefix":
do_remove_class_cc_prefix(options.dir_name)
elif options.type == "add_namespace":
do_add_namespace(options.dir_name)
else:
raise Exception("type error, please use correct -t opinion.")
if __name__ == '__main__':
try:
main()
except Exception as e:
print e
sys.exit(1)
|
nuobit/odoo-addons
|
refs/heads/11.0
|
contract_line_tax/__manifest__.py
|
1
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
'name': 'Contract line tax',
'summary': 'This module adds taxes to lines and propagates it to invoice',
'version': '11.0.1.0.0',
'category': 'Contract Management',
'author': 'NuoBiT Solutions, S.L., Eric Antones',
'website': 'https://www.nuobit.com',
'license': 'AGPL-3',
'depends': [
'contract',
],
'data': [
'views/account_analytic_contract_view.xml',
],
'installable': True,
'auto_install': False,
}
|
barbuza/django
|
refs/heads/master
|
tests/delete/__init__.py
|
12133432
| |
rcherrueau/sqlalchemy-migrate
|
refs/heads/cockroachdb/pike
|
migrate/tests/changeset/__init__.py
|
12133432
| |
freezmeinster/avagata-site
|
refs/heads/dev
|
django/conf/locale/hi/__init__.py
|
12133432
| |
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/testing/plugin/__init__.py
|
12133432
| |
ericmjl/bokeh
|
refs/heads/master
|
examples/app/stocks/download_sample_data.py
|
1
|
import os
import zipfile
from urllib.request import urlretrieve
def extract_hosted_zip(data_url, save_dir, exclude_term=None):
"""Downloads, then extracts a zip file."""
zip_name = os.path.join(save_dir, 'temp.zip')
# get the zip file
try:
print('Downloading %r to %r' % (data_url, zip_name))
zip_name, hdrs = urlretrieve(url=data_url, filename=zip_name)
print('Download successfully completed')
except IOError as e:
print("Could not successfully retrieve %r" % data_url)
raise e
# extract, then remove temp file
extract_zip(zip_name=zip_name, exclude_term=exclude_term)
os.unlink(zip_name)
print("Extraction Complete")
def extract_zip(zip_name, exclude_term=None):
"""Extracts a zip file to its containing directory."""
zip_dir = os.path.dirname(os.path.abspath(zip_name))
try:
with zipfile.ZipFile(zip_name) as z:
# write each zipped file out if it isn't a directory
files = [zip_file for zip_file in z.namelist() if not zip_file.endswith('/')]
print('Extracting %i files from %r.' % (len(files), zip_name))
for zip_file in files:
# remove any provided extra directory term from zip file
if exclude_term:
dest_file = zip_file.replace(exclude_term, '')
else:
dest_file = zip_file
dest_file = os.path.normpath(os.path.join(zip_dir, dest_file))
dest_dir = os.path.dirname(dest_file)
# make directory if it does not exist
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
# read file from zip, then write to new directory
data = z.read(zip_file)
with open(dest_file, 'wb') as f:
f.write(data)
except zipfile.error as e:
print("Bad zipfile (%r): %s" % (zip_name, e))
raise e
if __name__ == '__main__':
# info for retrieving and extracting the zip file
this_dir = os.path.dirname(os.path.realpath(__file__))
zip_file = 'http://quantquote.com/files/quantquote_daily_sp500_83986.zip'
zip_dir = 'quantquote_daily_sp500_83986/'
extract_hosted_zip(data_url=zip_file, save_dir=this_dir, exclude_term=zip_dir)
|
ARamsey118/Reverse-Javadoc
|
refs/heads/master
|
bs4/element.py
|
15
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(str):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = str.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = str.__new__(cls, name)
else:
obj = str.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(str):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return str.__new__(str, original_value)
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, str)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, str):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(str, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, str):
return str.__new__(cls, value)
return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (str(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = '<![CDATA['
SUFFIX = ']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = '<?'
SUFFIX = '?>'
class Comment(PreformattedString):
PREFIX = '<!--'
SUFFIX = '-->'
class Declaration(PreformattedString):
PREFIX = '<!'
SUFFIX = '!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = '<!DOCTYPE '
SUFFIX = '>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __bool__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, str):
val = str(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
str(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print('Running CSS selector "%s"' % selector)
for index, token in enumerate(tokens):
if self._select_debug:
print(' Considering token "%s"' % token)
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print(' Token was consumed by the previous combinator.')
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print(' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs))
print('-' * 40)
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print('(Recursive select picked up candidate %s %s)' % (i.name, i.attrs))
yield i
if self._select_debug:
print('-' * 40)
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print(' Default candidate generator, tag name="%s"' % check)
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print(" Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs)))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print(" SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print(" FAILURE %s %s" % (candidate.name, repr(candidate.attrs)))
current_context = new_context
if self._select_debug:
print("Final verdict:")
for i in current_context:
print(" %s %s" % (i.name, i.attrs))
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in list(attrs.items()):
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, str) or isinstance(value, collections.Callable) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, str)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return str(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, str):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, str)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, str):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
chand3040/sree_odoo
|
refs/heads/master
|
openerp/modules/graph.py
|
260
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules dependency graph. """
import os, sys, imp
from os.path import join as opj
import itertools
import zipimport
import openerp
import openerp.osv as osv
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import zipfile
import openerp.release as release
import re
import base64
from zipfile import PyZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import logging
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
The graph is a mapping from module name to Nodes.
"""
def add_node(self, name, info):
max_depth, father = 0, None
for d in info['depends']:
n = self.get(d) or Node(d, self, None) # lazy creation, do not use default value for get()
if n.depth >= max_depth:
father = n
max_depth = n.depth
if father:
return father.add_child(name, info)
else:
return Node(name, self, info)
def update_from_db(self, cr):
if not len(self):
return
# update the graph with values from the database (if exist)
## First, we set the default values for each package in graph
additional_data = dict((key, {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None}) for key in self.keys())
## Then we get the values from the database
cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
' FROM ir_module_module'
' WHERE name IN %s',(tuple(additional_data),)
)
## and we update the default values with values from the database
additional_data.update((x['name'], x) for x in cr.dictfetchall())
for package in self.values():
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
for module in module_list:
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
info = openerp.modules.module.load_information_from_description_file(module)
if info and info['installable']:
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
while packages and current > later:
package, info = packages[0]
deps = info['depends']
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x, y: x and y in self, deps, True):
if not package in current:
packages.pop(0)
continue
later.clear()
current.remove(package)
node = self.add_node(package, info)
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
packages.pop(0)
self.update_from_db(cr)
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
_logger.warning('Some modules were not loaded.')
return result
def __iter__(self):
level = 0
done = set(self.keys())
while done:
level_modules = sorted((name, module) for name, module in self.items() if module.depth==level)
for name, module in level_modules:
done.remove(name)
yield module
level += 1
def __str__(self):
return '\n'.join(str(n) for n in self if n.depth == 0)
class Node(object):
""" One module in the modules dependency graph.
Node acts as a per-module singleton. A node is constructed via
Graph.add_module() or Graph.add_modules(). Some of its fields are from
ir_module_module (setted by Graph.update_from_db()).
"""
def __new__(cls, name, graph, info):
if name in graph:
inst = graph[name]
else:
inst = object.__new__(cls)
graph[name] = inst
return inst
def __init__(self, name, graph, info):
self.name = name
self.graph = graph
self.info = info or getattr(self, 'info', {})
if not hasattr(self, 'children'):
self.children = []
if not hasattr(self, 'depth'):
self.depth = 0
@property
def data(self):
return self.info
def add_child(self, name, info):
node = Node(name, self.graph, info)
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Node, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)
def __iter__(self):
return itertools.chain(iter(self.children), *map(iter, self.children))
def __str__(self):
return self._pprint()
def _pprint(self, depth=0):
s = '%s\n' % self.name
for c in self.children:
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
miguelgrinberg/slam
|
refs/heads/master
|
slam/cli.py
|
1
|
from __future__ import print_function
from datetime import datetime
import inspect
import json
import logging
import os
try:
import pkg_resources
except ImportError: # pragma: no cover
pkg_resources = None
import random
import re
import subprocess
import shutil
import string
import sys
import time
import boto3
import botocore
import climax
from lambda_uploader.package import build_package
from merry import Merry
import yaml
from . import plugins
from .cfn import get_cfn_template
from .helpers import render_template
merry = Merry(logger_name='slam', debug='unittest' in sys.modules)
f = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h = logging.FileHandler('slam_error.log')
h.setFormatter(f)
merry.logger.addHandler(h)
@merry._try
@climax.group()
@climax.argument('--config-file', '-c', default='slam.yaml',
help='The slam configuration file. Defaults to slam.yaml.')
def main(config_file):
return {'config_file': config_file}
@merry._except(RuntimeError, ValueError)
def on_error(e): # pragma: no cover
"""Error handler
RuntimeError or ValueError exceptions raised by commands will be handled
by this function.
"""
exname = {'RuntimeError': 'Runtime error', 'Value Error': 'Value error'}
sys.stderr.write('{}: {}\n'.format(exname[e.__class__.__name__], str(e)))
sys.stderr.write('See file slam_error.log for additional details.\n')
sys.exit(1)
@merry._except(Exception)
def on_unexpected_error(e): # pragma: no cover
"""Catch-all error handler
Unexpected errors will be handled by this function.
"""
sys.stderr.write('Unexpected error: {} ({})\n'.format(
str(e), e.__class__.__name__))
sys.stderr.write('See file slam_error.log for additional details.\n')
sys.exit(1)
def _load_config(config_file='slam.yaml'):
try:
with open(config_file) as f:
return yaml.load(f, Loader=yaml.FullLoader)
except IOError:
# there is no config file in the current directory
raise RuntimeError('Config file {} not found. Did you run '
'"slam init"?'.format(config_file))
@main.command()
@climax.argument('--runtime', default=None,
help=('The Lambda runtime to use, such as python2.7 or '
'python3.6'))
@climax.argument('--requirements', default='requirements.txt',
help='The location of the project\'s requirements file.')
@climax.argument('--stages', default='dev',
help='Comma-separated list of stage environments to deploy.')
@climax.argument('--memory', type=int, default=128,
help=('The memory allocation for the lambda function in '
'megabytes.'))
@climax.argument('--timeout', type=int, default=10,
help='The timeout for the lambda function in seconds.')
@climax.argument('--bucket',
help='S3 bucket where lambda packages are stored.')
@climax.argument('--description', default='Deployed with slam.',
help='Description of the API.')
@climax.argument('--name',
help='API name.')
@climax.argument('function',
help='The function or callable to deploy, in the format '
'module:function.')
def init(name, description, bucket, timeout, memory, stages, requirements,
function, runtime, config_file, **kwargs):
"""Generate a configuration file."""
if os.path.exists(config_file):
raise RuntimeError('Please delete the old version {} if you want to '
'reconfigure your project.'.format(config_file))
module, app = function.split(':')
if not name:
name = module.replace('_', '-')
if not re.match('^[a-zA-Z][-a-zA-Z0-9]*$', name):
raise ValueError('The name {} is invalid, only letters, numbers and '
'dashes are allowed.'.format(name))
if not bucket:
random_suffix = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for n in range(8))
bucket = '{}-{}'.format(name.lower(), random_suffix)
stages = [s.strip() for s in stages.split(',')]
if runtime is None:
if sys.version_info[0] == 2: # pragma: no cover
runtime = 'python2.7'
else:
runtime = 'python3.6'
# generate slam.yaml
template_file = os.path.join(os.path.dirname(__file__),
'templates/slam.yaml')
with open(template_file) as f:
template = f.read()
template = render_template(template, name=name, description=description,
module=module, app=app, bucket=bucket,
timeout=timeout, memory=memory,
requirements=requirements, stages=stages,
devstage=stages[0], runtime=runtime)
with open(config_file, 'wt') as f:
f.write(template)
# plugins
config = _load_config(config_file)
for name, plugin in plugins.items():
# write plugin documentation as a comment in config file
with open(config_file, 'at') as f:
f.write('\n\n# ' + (plugin.__doc__ or name).replace(
'\n', '\n# ') + '\n')
if hasattr(plugin, 'init'):
arguments = {k: v for k, v in kwargs.items()
if k in getattr(plugin.init, '_argnames', [])}
plugin_config = plugin.init.func(config=config, **arguments)
if plugin_config:
with open(config_file, 'at') as f:
yaml.dump({name: plugin_config}, f,
default_flow_style=False)
print('The configuration file for your project has been generated. '
'Remember to add {} to source control.'.format(config_file))
def _run_command(cmd):
try:
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
except OSError:
raise RuntimeError('Invalid command {}'.format(cmd))
if proc.returncode != 0:
print(out)
raise(RuntimeError('Command failed with exit code {}.'.format(
proc.returncode)))
return out
def _run_lambda_function(event, context, app, config): # pragma: no cover
"""Run the function. This is the default when no plugins (such as wsgi)
define an alternative run function."""
args = event.get('args', [])
kwargs = event.get('kwargs', {})
# first attempt to invoke the function passing the lambda event and context
try:
ret = app(*args, event=event, context=context, **kwargs)
except TypeError:
# try again without passing the event and context
ret = app(*args, **kwargs)
return ret
def _generate_lambda_handler(config, output='.slam/handler.py'):
"""Generate a handler.py file for the lambda function start up."""
# Determine what the start up code is. The default is to just run the
# function, but it can be overriden by a plugin such as wsgi for a more
# elaborated way to run the function.
run_function = _run_lambda_function
for name, plugin in plugins.items():
if name in config and hasattr(plugin, 'run_lambda_function'):
run_function = plugin.run_lambda_function
run_code = ''.join(inspect.getsourcelines(run_function)[0][1:])
# generate handler.py
with open(os.path.join(os.path.dirname(__file__),
'templates/handler.py.template')) as f:
template = f.read()
template = render_template(template, module=config['function']['module'],
app=config['function']['app'],
run_lambda_function=run_code,
config_json=json.dumps(config,
separators=(',', ':')))
with open(output, 'wt') as f:
f.write(template + '\n')
def _build(config, rebuild_deps=False):
package = datetime.utcnow().strftime("lambda_package.%Y%m%d_%H%M%S.zip")
ignore = ['\\.slam\\/venv\\/.*$', '\\.pyc$']
if os.environ.get('VIRTUAL_ENV'):
# make sure the currently active virtualenv is not included in the pkg
venv = os.path.relpath(os.environ['VIRTUAL_ENV'], os.getcwd())
if not venv.startswith('.'):
ignore.append(venv.replace('/', '\\/') + '\\/.*$')
# create .slam directory if it doesn't exist yet
if not os.path.exists('.slam'):
os.mkdir('.slam')
_generate_lambda_handler(config)
# create or update virtualenv
if rebuild_deps:
if os.path.exists('.slam/venv'):
shutil.rmtree('.slam/venv')
if not os.path.exists('.slam/venv'):
_run_command('virtualenv .slam/venv')
_run_command('.slam/venv/bin/pip install -r ' + config['requirements'])
# build lambda package
build_package('.', config['requirements'], virtualenv='.slam/venv',
extra_files=['.slam/handler.py'], ignore=ignore,
zipfile_name=package)
# cleanup lambda uploader's temp directory
if os.path.exists('.lambda_uploader_temp'):
shutil.rmtree('.lambda_uploader_temp')
return package
def _get_aws_region(): # pragma: no cover
return boto3.session.Session().region_name
def _ensure_bucket_exists(s3, bucket, region): # pragma: no cover
try:
s3.head_bucket(Bucket=bucket)
except botocore.exceptions.ClientError:
if region != 'us-east-1':
s3.create_bucket(Bucket=bucket, CreateBucketConfiguration={
'LocationConstraint': region})
else:
s3.create_bucket(Bucket=bucket)
def _get_from_stack(stack, source, key):
value = None
if source + 's' not in stack:
raise ValueError('Invalid stack attribute' + str(stack))
for p in stack[source + 's']:
if p[source + 'Key'] == key:
value = p[source + 'Value']
break
return value
def _print_status(config):
cfn = boto3.client('cloudformation')
lmb = boto3.client('lambda')
try:
stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
print('{} has not been deployed yet.'.format(config['name']))
else:
print('{} is deployed!'.format(config['name']))
print(' Function name: {}'.format(
_get_from_stack(stack, 'Output', 'FunctionArn').split(':')[-1]))
print(' S3 bucket: {}'.format(config['aws']['s3_bucket']))
print(' Stages:')
stages = list(config['stage_environments'].keys())
stages.sort()
plugin_status = {}
for name, plugin in plugins.items():
if name in config and hasattr(plugin, 'status'):
statuses = plugin.status(config, stack)
if statuses:
for s, status in statuses.items():
plugin_status.setdefault(s, []).append(status)
for s in stages:
fd = None
try:
fd = lmb.get_function(FunctionName=_get_from_stack(
stack, 'Output', 'FunctionArn'), Qualifier=s)
except botocore.exceptions.ClientError: # pragma: no cover
continue
v = ':{}'.format(fd['Configuration']['Version'])
if s in plugin_status and len(plugin_status[s]) > 0:
print(' {}{}: {}'.format(s, v,
' '.join(plugin_status[s])))
else:
print(' {}{}'.format(s, v))
@main.command()
@climax.argument('--rebuild-deps', action='store_true',
help='Reinstall all dependencies.')
def build(rebuild_deps, config_file):
"""Build lambda package."""
config = _load_config(config_file)
print("Building lambda package...")
package = _build(config, rebuild_deps=rebuild_deps)
print("{} has been built successfully.".format(package))
@main.command()
@climax.argument('--stage',
help=('Stage to deploy to. Defaults to the stage designated '
'as the development stage'))
@climax.argument('--lambda-package',
help='Custom lambda zip package to deploy.')
@climax.argument('--no-lambda', action='store_true',
help='Do no deploy a new lambda.')
@climax.argument('--rebuild-deps', action='store_true',
help='Reinstall all dependencies.')
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file):
"""Deploy the project to the development stage."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
s3 = boto3.client('s3')
cfn = boto3.client('cloudformation')
region = _get_aws_region()
# obtain previous deployment if it exists
previous_deployment = None
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
pass
# build lambda package if required
built_package = False
new_package = True
if lambda_package is None and not no_lambda:
print("Building lambda package...")
lambda_package = _build(config, rebuild_deps=rebuild_deps)
built_package = True
elif lambda_package is None:
# preserve package from previous deployment
new_package = False
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# create S3 bucket if it doesn't exist yet
bucket = config['aws']['s3_bucket']
_ensure_bucket_exists(s3, bucket, region)
# upload lambda package to S3
if new_package:
s3.upload_file(lambda_package, bucket, lambda_package)
if built_package:
# we created the package, so now that is on S3 we can delete it
os.remove(lambda_package)
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
v = '$LATEST'
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
if previous_deployment is None:
print('Deploying {}:{}...'.format(config['name'], stage))
cfn.create_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_create_complete')
else:
print('Updating {}:{}...'.format(config['name'], stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
# the update failed, so we remove the lambda package from S3
if built_package:
s3.delete_object(Bucket=bucket, Key=lambda_package)
raise
else:
if previous_deployment and new_package:
# the update succeeded, so it is safe to delete the lambda package
# used by the previous deployment
old_pkg = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
s3.delete_object(Bucket=bucket, Key=old_pkg)
# we are done, show status info and exit
_print_status(config)
@main.command()
@climax.argument('--version',
help=('Stage name or numeric version to publish. '
'Defaults to the development stage.'))
@climax.argument('stage', help='Stage to publish to.')
def publish(version, stage, config_file):
"""Publish a version of the project to a stage."""
config = _load_config(config_file)
cfn = boto3.client('cloudformation')
if version is None:
version = config['devstage']
elif version not in config['stage_environments'].keys() and \
not version.isdigit():
raise ValueError('Invalid version. Use a stage name or a numeric '
'version number.')
if version == stage:
raise ValueError('Cannot deploy a stage into itself.')
# obtain previous deployment
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
raise RuntimeError('This project has not been deployed yet.')
# preserve package from previous deployment
bucket = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Bucket')
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
if version.isdigit():
# explicit version number
v = version
else:
# publish version from a stage
v = _get_from_stack(previous_deployment, 'Parameter',
version.title() + 'Version')
if v == '$LATEST':
# publish a new version from $LATEST
lmb = boto3.client('lambda')
v = lmb.publish_version(FunctionName=_get_from_stack(
previous_deployment, 'Output', 'FunctionArn'))[
'Version']
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
print('Publishing {}:{} to {}...'.format(config['name'], version, stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
raise
# we are done, show status info and exit
_print_status(config)
@main.command()
@climax.argument('args', nargs='*',
help='Input arguments for the function. Use arg=value for '
'strings, or arg:=value for integer, booleans or JSON '
'structures.')
@climax.argument('--dry-run', action='store_true',
help='Just check that the function can be invoked.')
@climax.argument('--nowait', action='store_true',
help='Invoke the function but don\'t wait for it to return.')
@climax.argument('--stage', help='Stage of the invoked function. Defaults to '
'the development stage')
def invoke(stage, nowait, dry_run, config_file, args):
"""Invoke the lambda function."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
cfn = boto3.client('cloudformation')
lmb = boto3.client('lambda')
try:
stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
raise RuntimeError('This project has not been deployed yet.')
function = _get_from_stack(stack, 'Output', 'FunctionArn')
if dry_run:
invocation_type = 'DryRun'
elif nowait:
invocation_type = 'Event'
else:
invocation_type = 'RequestResponse'
# parse input arguments
data = {}
for arg in args:
s = arg.split('=', 1)
if len(s) != 2:
raise ValueError('Invalid argument ' + arg)
if s[0][-1] == ':':
# JSON argument
data[s[0][:-1]] = json.loads(s[1])
else:
# string argument
data[s[0]] = s[1]
rv = lmb.invoke(FunctionName=function, InvocationType=invocation_type,
Qualifier=stage,
Payload=json.dumps({'kwargs': data}, sort_keys=True))
if rv['StatusCode'] != 200 and rv['StatusCode'] != 202:
raise RuntimeError('Unexpected error. Status code = {}.'.format(
rv['StatusCode']))
if invocation_type == 'RequestResponse':
payload = json.loads(rv['Payload'].read().decode('utf-8'))
if 'FunctionError' in rv:
if 'stackTrace' in payload:
print('Traceback (most recent call last):')
for frame in payload['stackTrace']:
print(' File "{}", line {}, in {}'.format(
frame[0], frame[1], frame[2]))
print(' ' + frame[3])
print('{}: {}'.format(payload['errorType'],
payload['errorMessage']))
else:
raise RuntimeError('Unknown error')
else:
print(str(payload))
@main.command()
@climax.argument('--no-logs', action='store_true', help='Do not delete logs.')
def delete(no_logs, config_file):
"""Delete the project."""
config = _load_config(config_file)
s3 = boto3.client('s3')
cfn = boto3.client('cloudformation')
logs = boto3.client('logs')
try:
stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
raise RuntimeError('This project has not been deployed yet.')
bucket = _get_from_stack(stack, 'Parameter', 'LambdaS3Bucket')
lambda_package = _get_from_stack(stack, 'Parameter', 'LambdaS3Key')
function = _get_from_stack(stack, 'Output', 'FunctionArn').split(':')[-1]
api_id = _get_from_stack(stack, 'Output', 'ApiId')
if api_id:
log_groups = ['API-Gateway-Execution-Logs_' + api_id + '/' + stage
for stage in config['stage_environments'].keys()]
else:
log_groups = []
log_groups.append('/aws/lambda/' + function)
print('Deleting {}...'.format(config['name']))
cfn.delete_stack(StackName=config['name'])
waiter = cfn.get_waiter('stack_delete_complete')
waiter.wait(StackName=config['name'])
if not no_logs:
print('Deleting logs...')
for log_group in log_groups:
try:
logs.delete_log_group(logGroupName=log_group)
except botocore.exceptions.ClientError:
print(' Log group {} could not be deleted.'.format(log_group))
print('Deleting files...')
try:
s3.delete_object(Bucket=bucket, Key=lambda_package)
s3.delete_bucket(Bucket=bucket)
except botocore.exceptions.ClientError:
print(' S3 bucket {} could not be deleted.'.format(bucket))
@main.command()
def status(config_file):
"""Show deployment status for the project."""
config = _load_config(config_file)
_print_status(config)
@main.command()
@climax.argument('--tail', '-t', action='store_true',
help='Tail the log stream')
@climax.argument('--period', '-p', default='1m',
help=('How far back to start, in weeks (1w), days (2d), '
'hours (3h), minutes (4m) or seconds (5s). Default '
'is 1m.'))
@climax.argument('--stage',
help=('Stage to show logs for. Defaults to the stage '
'designated as the development stage'))
def logs(stage, period, tail, config_file):
"""Dump logs to the console."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
cfn = boto3.client('cloudformation')
try:
stack = cfn.describe_stacks(StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
print('{} has not been deployed yet.'.format(config['name']))
return
function = _get_from_stack(stack, 'Output', 'FunctionArn').split(':')[-1]
version = _get_from_stack(stack, 'Parameter', stage.title() + 'Version')
api_id = _get_from_stack(stack, 'Output', 'ApiId')
try:
start = float(period[:-1])
except ValueError:
raise ValueError('Invalid period ' + period)
if period[-1] == 's':
start = time.time() - start
elif period[-1] == 'm':
start = time.time() - start * 60
elif period[-1] == 'h':
start = time.time() - start * 60 * 60
elif period[-1] == 'd':
start = time.time() - start * 60 * 60 * 24
elif period[-1] == 'w':
start = time.time() - start * 60 * 60 * 24 * 7
else:
raise ValueError('Invalid period ' + period)
start = int(start * 1000)
logs = boto3.client('logs')
lambda_log_group = '/aws/lambda/' + function
log_groups = [lambda_log_group]
if api_id:
log_groups.append('API-Gateway-Execution-Logs_' + api_id + '/' + stage)
log_version = '[' + version + ']'
log_start = {g: start for g in log_groups}
while True:
kwargs = {}
events = []
for log_group in log_groups:
while True:
try:
filtered_logs = logs.filter_log_events(
logGroupName=log_group,
startTime=log_start[log_group],
interleaved=True, **kwargs)
except botocore.exceptions.ClientError:
# the log group does not exist yet
filtered_logs = {'events': []}
if log_group == lambda_log_group:
events += [ev for ev in filtered_logs['events']
if log_version in ev['logStreamName']]
else:
events += filtered_logs['events']
if len(filtered_logs['events']):
log_start[log_group] = \
filtered_logs['events'][-1]['timestamp'] + 1
if 'nextToken' not in filtered_logs:
break
kwargs['nextToken'] = filtered_logs['nextToken']
events.sort(key=lambda ev: ev['timestamp'])
for ev in events:
tm = datetime.fromtimestamp(ev['timestamp'] / 1000)
print(tm.strftime('%b %d %X ') + ev['message'].strip())
if not tail:
break
time.sleep(5)
@main.command()
def template(config_file):
"""Print the default Cloudformation deployment template."""
config = _load_config(config_file)
print(get_cfn_template(config, pretty=True))
def register_plugins():
"""find any installed plugins and register them."""
if pkg_resources: # pragma: no cover
for ep in pkg_resources.iter_entry_points('slam_plugins'):
plugin = ep.load()
# add any init options to the main init command
if hasattr(plugin, 'init') and hasattr(plugin.init, '_arguments'):
for arg in plugin.init._arguments:
init.parser.add_argument(*arg[0], **arg[1])
init._arguments += plugin.init._arguments
init._argnames += plugin.init._argnames
plugins[ep.name] = plugin
register_plugins() # pragma: no cover
|
gopal1cloud/neutron
|
refs/heads/master
|
neutron/plugins/ml2/config.py
|
27
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
ml2_opts = [
cfg.ListOpt('type_drivers',
default=['local', 'flat', 'vlan', 'gre', 'vxlan'],
help=_("List of network type driver entrypoints to be loaded "
"from the neutron.ml2.type_drivers namespace.")),
cfg.ListOpt('tenant_network_types',
default=['local'],
help=_("Ordered list of network_types to allocate as tenant "
"networks.")),
cfg.ListOpt('mechanism_drivers',
default=[],
help=_("An ordered list of networking mechanism driver "
"entrypoints to be loaded from the "
"neutron.ml2.mechanism_drivers namespace.")),
]
cfg.CONF.register_opts(ml2_opts, "ml2")
|
codepantry/django
|
refs/heads/master
|
tests/postgres_tests/test_ranges.py
|
161
|
import datetime
import json
import unittest
from django import forms
from django.core import exceptions, serializers
from django.db import connection
from django.db.models import F
from django.test import TestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
def skipUnlessPG92(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestQueringWithRanges(TestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" /><input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
|
40223231/2015-cdb-g4-final-test-by-6-22
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py
|
733
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
Thor77/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/ooyala.py
|
11
|
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
ExtractorError,
unsmuggle_url,
determine_ext,
)
from ..compat import compat_urllib_parse_urlencode
class OoyalaBaseIE(InfoExtractor):
_PLAYER_BASE = 'http://player.ooyala.com/'
_CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
_AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s?'
def _extract(self, content_tree_url, video_id, domain='example.org'):
content_tree = self._download_json(content_tree_url, video_id)['content_tree']
metadata = content_tree[list(content_tree)[0]]
embed_code = metadata['embed_code']
pcode = metadata.get('asset_pcode') or embed_code
title = metadata['title']
auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
compat_urllib_parse_urlencode({
'domain': domain,
'supportedFormats': 'mp4,rtmp,m3u8,hds',
}), video_id)
cur_auth_data = auth_data['authorization_data'][embed_code]
urls = []
formats = []
if cur_auth_data['authorized']:
for stream in cur_auth_data['streams']:
s_url = base64.b64decode(
stream['url']['data'].encode('ascii')).decode('utf-8')
if s_url in urls:
continue
urls.append(s_url)
ext = determine_ext(s_url, None)
delivery_type = stream['delivery_type']
if delivery_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
s_url, embed_code, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif delivery_type == 'hds' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(
s_url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
s_url, embed_code, fatal=False))
else:
formats.append({
'url': s_url,
'ext': ext or stream.get('delivery_type'),
'vcodec': stream.get('video_codec'),
'format_id': delivery_type,
'width': int_or_none(stream.get('width')),
'height': int_or_none(stream.get('height')),
'abr': int_or_none(stream.get('audio_bitrate')),
'vbr': int_or_none(stream.get('video_bitrate')),
'fps': float_or_none(stream.get('framerate')),
})
else:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, cur_auth_data['message']), expected=True)
self._sort_formats(formats)
subtitles = {}
for lang, sub in metadata.get('closed_captions_vtt', {}).get('captions', {}).items():
sub_url = sub.get('url')
if not sub_url:
continue
subtitles[lang] = [{
'url': sub_url,
}]
return {
'id': embed_code,
'title': title,
'description': metadata.get('description'),
'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'),
'duration': float_or_none(metadata.get('duration'), 1000),
'subtitles': subtitles,
'formats': formats,
}
class OoyalaIE(OoyalaBaseIE):
_VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
_TESTS = [
{
# From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'info_dict': {
'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
'duration': 853.386,
},
# The video in the original webpage now uses PlayWire
'skip': 'Ooyala said: movie expired',
}, {
# Only available for ipad
'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'info_dict': {
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
'duration': 194.948,
},
},
{
# Information available only through SAS api
# From http://community.plm.automation.siemens.com/t5/News-NX-Manufacturing/Tool-Path-Divide/ba-p/4187
'url': 'http://player.ooyala.com/player.js?embedCode=FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'md5': 'a84001441b35ea492bc03736e59e7935',
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
'title': 'Divide Tool Path.mp4',
'duration': 204.405,
}
}
]
@staticmethod
def _url_for_embed_code(embed_code):
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
@classmethod
def _build_url_result(cls, embed_code):
return cls.url_result(cls._url_for_embed_code(embed_code),
ie=cls.ie_key())
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
embed_code = self._match_id(url)
domain = smuggled_data.get('domain')
content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
return self._extract(content_tree_url, embed_code, domain)
class OoyalaExternalIE(OoyalaBaseIE):
_VALID_URL = r'''(?x)
(?:
ooyalaexternal:|
https?://.+?\.ooyala\.com/.*?\bexternalId=
)
(?P<partner_id>[^:]+)
:
(?P<id>.+)
(?:
:|
.*?&pcode=
)
(?P<pcode>.+?)
(?:&|$)
'''
_TEST = {
'url': 'https://player.ooyala.com/player.js?externalId=espn:10365079&pcode=1kNG061cgaoolOncv54OAO1ceO-I&adSetCode=91cDU6NuXTGKz3OdjOxFdAgJVtQcKJnI&callback=handleEvents&hasModuleParams=1&height=968&playerBrandingId=7af3bd04449c444c964f347f11873075&targetReplaceId=videoPlayer&width=1656&wmode=opaque&allowScriptAccess=always',
'info_dict': {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
'duration': 1302.0,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups()
content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id)
return self._extract(content_tree_url, video_id)
|
TheMrNomis/mavlink
|
refs/heads/master
|
pymavlink/tools/mavflightmodes.py
|
45
|
#!/usr/bin/env python
'''
show changes in flight modes
'''
import sys, time, datetime, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def flight_modes(logfile):
'''show flight modes for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
mode = ""
previous_mode = ""
mode_start_timestamp = -1
time_in_mode = {}
previous_percent = -1
seconds_per_percent = -1
filesize = os.path.getsize(filename)
while True:
m = mlog.recv_match(type=['SYS_STATUS','HEARTBEAT','MODE'],
condition='MAV.flightmode!="%s"' % mlog.flightmode)
if m is None:
break
print('%s MAV.flightmode=%-12s (MAV.timestamp=%u %u%%)' % (
time.asctime(time.localtime(m._timestamp)),
mlog.flightmode,
m._timestamp, mlog.percent))
mode = mlog.flightmode
if (mode not in time_in_mode):
time_in_mode[mode] = 0
if (mode_start_timestamp == -1):
mode_start_timestamp = m._timestamp
elif (previous_mode != "" and previous_mode != mode):
time_in_mode[previous_mode] = time_in_mode[previous_mode] + (m._timestamp - mode_start_timestamp)
#figure out how many seconds per percentage point so I can
#caculate how many seconds for the final mode
if (seconds_per_percent == -1 and previous_percent != -1
and previous_percent != mlog.percent):
seconds_per_percent = (m._timestamp - mode_start_timestamp) / (mlog.percent - previous_percent)
mode_start_timestamp = m._timestamp
previous_mode = mode
previous_percent = mlog.percent
#put a whitespace line before the per-mode report
print()
print("Time per mode:")
#need to get the time in the final mode
if (seconds_per_percent != -1):
seconds_remaining = (100.0 - previous_percent) * seconds_per_percent
time_in_mode[previous_mode] = time_in_mode[previous_mode] + seconds_remaining
total_flight_time = 0
for key, value in time_in_mode.iteritems():
total_flight_time = total_flight_time + value
for key, value in time_in_mode.iteritems():
print('%-12s %s %.2f%%' % (key, str(datetime.timedelta(seconds=int(value))), (value / total_flight_time) * 100.0))
else:
#can't print time in mode if only one mode during flight
print(previous_mode, " 100% of flight time")
for filename in args.logs:
flight_modes(filename)
|
caot/intellij-community
|
refs/heads/master
|
python/lib/Lib/colorsys.py
|
92
|
"""Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0: r = 0.0
if g < 0.0: g = 0.0
if b < 0.0: b = 0.0
if r > 1.0: r = 1.0
if g > 1.0: g = 1.0
if b > 1.0: b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc: return 0.0, l, 0.0
if l <= 0.5: s = (maxc-minc) / (maxc+minc)
else: s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0: return l, l, l
if l <= 0.5: m2 = l * (1.0+s)
else: m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
if hue < 0.5: return m2
if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc: return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0: return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
if i%6 == 0: return v, t, p
if i == 1: return q, v, p
if i == 2: return p, v, t
if i == 3: return p, q, v
if i == 4: return t, p, v
if i == 5: return v, p, q
# Cannot get here
|
synergeticsedx/deployment-wipro
|
refs/heads/oxa/master.fic
|
common/djangoapps/util/memcache.py
|
251
|
"""
This module provides a KEY_FUNCTION suitable for use with a memcache backend
so that we can cache any keys, not just ones that memcache would ordinarily accept
"""
from django.utils.encoding import smart_str
import hashlib
import urllib
def fasthash(string):
"""
Hashes `string` into a string representation of a 128-bit digest.
"""
md4 = hashlib.new("md4")
md4.update(string)
return md4.hexdigest()
def cleaned_string(val):
"""
Converts `val` to unicode and URL-encodes special characters
(including quotes and spaces)
"""
return urllib.quote_plus(smart_str(val))
def safe_key(key, key_prefix, version):
"""
Given a `key`, `key_prefix`, and `version`,
return a key that is safe to use with memcache.
`key`, `key_prefix`, and `version` can be numbers, strings, or unicode.
"""
# Clean for whitespace and control characters, which
# cause memcache to raise an exception
key = cleaned_string(key)
key_prefix = cleaned_string(key_prefix)
version = cleaned_string(version)
# Attempt to combine the prefix, version, and key
combined = ":".join([key_prefix, version, key])
# If the total length is too long for memcache, hash it
if len(combined) > 250:
combined = fasthash(combined)
# Return the result
return combined
|
pferreir/indico
|
refs/heads/master
|
indico/util/mdx_latex.py
|
3
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
"""Extension to python-markdown to support LaTeX (rather than html) output.
Authored by Rufus Pollock: <http://www.rufuspollock.org/>
Reworked by Julian Wulfheide (ju.wulfheide@gmail.com) and
Indico Project (indico-team@cern.ch)
Usage:
======
1. Command Line. A script entitled markdown2latex.py is automatically
installed. For details of usage see help::
$ markdown2latex.py -h
2. As a python-markdown extension::
>>> import markdown
>>> md = markdown.Markdown(None, extensions=['latex'])
>>> # text is input string ...
>>> latex_out = md.convert(text)
3. Directly as a module (slight inversion of std markdown extension setup)::
>>> import markdown
>>> import mdx_latex
>>> md = markdown.Markdown()
>>> latex_mdx = mdx_latex.LaTeXExtension()
>>> latex_mdx.extendMarkdown(md, markdown.__dict__)
>>> out = md.convert(text)
History
=======
Version: 1.0 (November 15, 2006)
* First working version (compatible with markdown 1.5)
* Includes support for tables
Version: 1.1 (January 17, 2007)
* Support for verbatim and images
Version: 1.2 (June 2008)
* Refactor as an extension.
* Make into a proper python/setuptools package.
* Tested with markdown 1.7 but should work with 1.6 and (possibly) 1.5
(though pre/post processor stuff not as worked out there)
Version 1.3: (July 2008)
* Improvements to image output (width)
Version 1.3.1: (August 2009)
* Tiny bugfix to remove duplicate keyword argument and set zip_safe=False
* Add [width=\textwidth] by default for included images
Version 2.0: (June 2011)
* PEP8 cleanup
* Major rework since this was broken by new Python-Markdown releases
Version 2.1: (August 2013)
* Add handler for non locally referenced images, hyperlinks and horizontal rules
* Update math delimiters
"""
import os
import re
import textwrap
import uuid
from io import BytesIO
from mimetypes import guess_extension
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
from xml.etree import ElementTree as etree
import markdown
import requests
from lxml.html import html5parser
from PIL import Image
from requests.exceptions import ConnectionError, InvalidURL
__version__ = '2.1'
start_single_quote_re = re.compile(r"""(^|\s|")'""")
start_double_quote_re = re.compile(r'''(^|\s|'|`)"''')
end_double_quote_re = re.compile(r'"(,|\.|\s|$)')
Image.init()
IMAGE_FORMAT_EXTENSIONS = {format: ext for (ext, format) in Image.EXTENSION.items()}
safe_mathmode_commands = {
'above', 'abovewithdelims', 'acute', 'aleph', 'alpha', 'amalg', 'And', 'angle', 'approx', 'arccos', 'arcsin',
'arctan', 'arg', 'array', 'Arrowvert', 'arrowvert', 'ast', 'asymp', 'atop', 'atopwithdelims', 'backslash',
'backslash', 'bar', 'Bbb', 'begin', 'beta', 'bf', 'Big', 'big', 'bigcap', 'bigcirc', 'bigcup', 'Bigg', 'bigg',
'Biggl', 'biggl', 'Biggm', 'biggm', 'Biggr', 'biggr', 'Bigl', 'bigl', 'Bigm', 'bigm', 'bigodot', 'bigoplus',
'bigotimes', 'Bigr', 'bigr', 'bigsqcup', 'bigtriangledown', 'bigtriangleup', 'biguplus', 'bigvee', 'bigwedge',
'bmod', 'bot', 'bowtie', 'brace', 'bracevert', 'brack', 'breve', 'buildrel', 'bullet', 'cap', 'cases', 'cdot',
'cdotp', 'cdots', 'check', 'chi', 'choose', 'circ', 'clubsuit', 'colon', 'cong', 'coprod', 'cos', 'cosh', 'cot',
'coth', 'cr', 'csc', 'cup', 'dagger', 'dashv', 'ddagger', 'ddot', 'ddots', 'deg', 'Delta', 'delta', 'det',
'diamond', 'diamondsuit', 'dim', 'displaylines', 'displaystyle', 'div', 'dot', 'doteq', 'dots', 'dotsb', 'dotsc',
'dotsi', 'dotsm', 'dotso', 'Downarrow', 'downarrow', 'ell', 'emptyset', 'end', 'enspace', 'epsilon', 'eqalign',
'eqalignno', 'equiv', 'eta', 'exists', 'exp', 'fbox', 'flat', 'forall', 'frac', 'frak', 'frown', 'Gamma', 'gamma',
'gcd', 'ge', 'geq', 'gets', 'gg', 'grave', 'gt', 'gt', 'hat', 'hbar', 'hbox', 'hdashline', 'heartsuit', 'hline',
'hom', 'hookleftarrow', 'hookrightarrow', 'hphantom', 'hskip', 'hspace', 'Huge', 'huge', 'iff', 'iiint', 'iint',
'Im', 'imath', 'in', 'inf', 'infty', 'int', 'intop', 'iota', 'it', 'jmath', 'kappa', 'ker', 'kern', 'Lambda',
'lambda', 'land', 'langle', 'LARGE', 'Large', 'large', 'LaTeX', 'lbrace', 'lbrack', 'lceil', 'ldotp', 'ldots', 'le',
'left', 'Leftarrow', 'leftarrow', 'leftharpoondown', 'leftharpoonup', 'Leftrightarrow', 'leftrightarrow',
'leftroot', 'leq', 'leqalignno', 'lfloor', 'lg', 'lgroup', 'lim', 'liminf', 'limits', 'limsup', 'll', 'llap',
'lmoustache', 'ln', 'lnot', 'log', 'Longleftarrow', 'longleftarrow', 'Longleftrightarrow', 'longleftrightarrow',
'longmapsto', 'Longrightarrow', 'longrightarrow', 'lor', 'lower', 'lt', 'lt', 'mapsto', 'mathbb', 'mathbf',
'mathbin', 'mathcal', 'mathclose', 'mathfrak', 'mathinner', 'mathit', 'mathop', 'mathopen', 'mathord', 'mathpunct',
'mathrel', 'mathrm', 'mathscr', 'mathsf', 'mathstrut', 'mathtt', 'matrix', 'max', 'mbox', 'mid', 'middle', 'min',
'mit', 'mkern', 'mod', 'models', 'moveleft', 'moveright', 'mp', 'mskip', 'mspace', 'mu', 'nabla', 'natural', 'ne',
'nearrow', 'neg', 'negthinspace', 'neq', 'newline', 'ni', 'nolimits', 'normalsize', 'not', 'notin', 'nu', 'nwarrow',
'odot', 'oint', 'oldstyle', 'Omega', 'omega', 'omicron', 'ominus', 'oplus', 'oslash', 'otimes', 'over', 'overbrace',
'overleftarrow', 'overleftrightarrow', 'overline', 'overrightarrow', 'overset', 'overwithdelims', 'owns',
'parallel', 'partial', 'perp', 'phantom', 'Phi', 'phi', 'Pi', 'pi', 'pm', 'pmatrix', 'pmb', 'pmod', 'pod', 'Pr',
'prec', 'preceq', 'prime', 'prod', 'propto', 'Psi', 'psi', 'qquad', 'quad', 'raise', 'rangle', 'rbrace', 'rbrack',
'rceil', 'Re', 'rfloor', 'rgroup', 'rho', 'right', 'Rightarrow', 'rightarrow', 'rightharpoondown', 'rightharpoonup',
'rightleftharpoons', 'rlap', 'rm', 'rmoustache', 'root', 'S', 'scr', 'scriptscriptstyle', 'scriptsize',
'scriptstyle', 'searrow', 'sec', 'setminus', 'sf', 'sharp', 'Sigma', 'sigma', 'sim', 'simeq', 'sin', 'sinh', 'skew',
'small', 'smallint', 'smash', 'smile', 'Space', 'space', 'spadesuit', 'sqcap', 'sqcup', 'sqrt', 'sqsubseteq',
'sqsupseteq', 'stackrel', 'star', 'strut', 'subset', 'subseteq', 'succ', 'succeq', 'sum', 'sup', 'supset',
'supseteq', 'surd', 'swarrow', 'tan', 'tanh', 'tau', 'TeX', 'text', 'textbf', 'textit', 'textrm', 'textsf',
'textstyle', 'texttt', 'Theta', 'theta', 'thinspace', 'tilde', 'times', 'tiny', 'to', 'top', 'triangle',
'triangleleft', 'triangleright', 'tt', 'underbrace', 'underleftarrow', 'underleftrightarrow', 'underline',
'underrightarrow', 'underset', 'Uparrow', 'uparrow', 'Updownarrow', 'updownarrow', 'uplus', 'uproot', 'Upsilon',
'upsilon', 'varepsilon', 'varphi', 'varpi', 'varrho', 'varsigma', 'vartheta', 'vcenter', 'vdash', 'vdots', 'vec',
'vee', 'Vert', 'vert', 'vphantom', 'wedge', 'widehat', 'widetilde', 'wp', 'wr', 'Xi', 'xi', 'zeta', '\\'
}
class ImageURLException(Exception):
pass
def unescape_html_entities(text):
out = text.replace('&', '&')
out = out.replace('<', '<')
out = out.replace('>', '>')
out = out.replace('"', '"')
return out
def latex_escape(text, ignore_math=True, ignore_braces=False):
if text is None:
return ''
chars = {
"#": r"\#",
"$": r"\$",
"%": r"\%",
"&": r"\&",
"~": r"\~{}",
"_": r"\_",
"^": r"\^{}",
"\\": r"\textbackslash{}",
"\x0c": "",
"\x0b": ""
}
if not ignore_braces:
chars.update({
"{": r"\{",
"}": r"\}"})
math_segments = []
def substitute(x):
return chars[x.group()]
math_placeholder = f'[*LaTeXmath-{str(uuid.uuid4())}*]'
def math_replace(m):
math_segments.append(m.group(0))
return math_placeholder
if ignore_math:
# Extract math-mode segments and replace with placeholder
text = re.sub(r'\$[^\$]+\$|\$\$(^\$)\$\$', math_replace, text)
pattern = re.compile('|'.join(re.escape(k) for k in chars.keys()))
res = pattern.sub(substitute, text)
if ignore_math:
# Sanitize math-mode segments and put them back in place
math_segments = list(map(sanitize_mathmode, math_segments))
res = re.sub(re.escape(math_placeholder), lambda _: "\\protect " + math_segments.pop(0), res)
return res
def sanitize_mathmode(text):
def _escape_unsafe_command(m):
command = m.group(1)
return m.group(0) if command in safe_mathmode_commands else r'\\' + command
return re.sub(r'\\([a-zA-Z]+|\\)', _escape_unsafe_command, text)
def escape_latex_entities(text):
"""Escape latex reserved characters."""
out = text
out = unescape_html_entities(out)
out = start_single_quote_re.sub(r'\g<1>`', out)
out = start_double_quote_re.sub(r'\g<1>``', out)
out = end_double_quote_re.sub(r"''\g<1>", out)
out = latex_escape(out)
return out
def unescape_latex_entities(text):
"""Limit ourselves as this is only used for maths stuff."""
out = text
out = out.replace('\\&', '&')
return out
def latex_render_error(message):
"""Generate nice error box in LaTeX document.
:param message: The error message
:returns: LaTeX code for error box
"""
return textwrap.dedent(r"""
\begin{tcolorbox}[width=\textwidth,colback=red!5!white,colframe=red!75!black,title={Indico rendering error}]
\begin{verbatim}%s\end{verbatim}
\end{tcolorbox}""" % latex_escape(message))
def latex_render_image(src, alt, tmpdir, strict=False):
"""Generate LaTeX code that includes an arbitrary image from a URL.
This involves fetching the image from a web server and figuring out its
MIME type. A temporary file will be created, which is not immediately
deleted since it has to be included in the LaTeX code. It should be handled
by the enclosing code.
:param src: source URL of the image
:param alt: text to use as ``alt="..."``
:param tmpdir: the directory where to put any temporary files
:param strict: whether a faulty URL should break the whole process
:returns: a ``(latex_code, file_path)`` tuple, containing the LaTeX code
and path to the temporary image file.
"""
try:
if urlparse(src).scheme not in ('http', 'https'):
raise ImageURLException(f"URL scheme not supported: {src}")
else:
try:
resp = requests.get(src, verify=False, timeout=5)
except InvalidURL:
raise ImageURLException(f"Cannot understand URL '{src}'")
except (requests.Timeout, ConnectionError):
raise ImageURLException(f"Problem downloading image ({src})")
except requests.TooManyRedirects:
raise ImageURLException(f"Too many redirects downloading image ({src})")
extension = None
if resp.status_code != 200:
raise ImageURLException(f"[{resp.status_code}] Error fetching image")
if resp.headers.get('content-type'):
extension = guess_extension(resp.headers['content-type'])
# as incredible as it might seem, '.jpe' will be the answer in some Python environments
if extension == '.jpe':
extension = '.jpg'
if not extension:
try:
# Try to use PIL to get file type
image = Image.open(BytesIO(resp.content))
# Worst case scenario, assume it's PNG
extension = IMAGE_FORMAT_EXTENSIONS.get(image.format, '.png')
except OSError:
raise ImageURLException("Cannot read image data. Maybe not an image file?")
with NamedTemporaryFile(prefix='indico-latex-', suffix=extension, dir=tmpdir, delete=False) as tempfile:
tempfile.write(resp.content)
except ImageURLException as exc:
if strict:
raise
else:
return latex_render_error(f'Could not include image: {exc}'), None
# Using graphicx and ajustbox package for *max width*
return (textwrap.dedent(r"""
\begin{figure}[H]
\centering
\includegraphics[max width=\linewidth]{%s}
\caption{%s}
\end{figure}
""" % (os.path.basename(tempfile.name), latex_escape(alt))), tempfile.name)
def makeExtension(configs=None):
return LaTeXExtension(configs=configs)
class LaTeXExtension(markdown.Extension):
def __init__(self, configs=None):
self.configs = configs
self.reset()
def extendMarkdown(self, md, md_globals):
self.md = md
# remove escape pattern -- \\(.*) -- as this messes up any embedded
# math and we don't need to escape stuff any more for html
self.md.inlinePatterns.deregister('escape')
latex_tp = LaTeXTreeProcessor(self.configs)
math_pp = MathTextPostProcessor()
link_pp = LinkTextPostProcessor()
unescape_html_pp = UnescapeHtmlTextPostProcessor()
md.treeprocessors.register(latex_tp, 'latex', md.treeprocessors._priority[-1].priority - 1)
md.postprocessors.register(unescape_html_pp, 'unescape_html', md.postprocessors._priority[-1].priority - 1)
md.postprocessors.register(math_pp, 'math', md.postprocessors._priority[-1].priority - 1)
md.postprocessors.register(link_pp, 'link', md.postprocessors._priority[-1].priority - 1)
# Needed for LaTeX postprocessors not to choke on URL-encoded urls
md.inlinePatterns.register(NonEncodedAutoMailPattern(markdown.inlinepatterns.AUTOMAIL_RE, md), 'automail', 110)
def reset(self):
pass
class NonEncodedAutoMailPattern(markdown.inlinepatterns.Pattern):
"""Reimplementation of AutoMailPattern to avoid URL-encoded links."""
def handleMatch(self, m):
el = etree.Element('a')
email = self.unescape(m.group(2))
email.removeprefix('mailto:')
el.text = markdown.util.AtomicString(''.join(email))
el.set('href', f'mailto:{email}')
return el
class LaTeXTreeProcessor(markdown.treeprocessors.Treeprocessor):
def __init__(self, configs):
self.configs = configs
def run(self, doc):
"""
Walk the dom converting relevant nodes to text nodes with relevant
content.
"""
latex_text = self.tolatex(doc)
doc.clear()
doc.text = latex_text
def tolatex(self, ournode):
buffer = ""
subcontent = ""
if ournode.text:
subcontent += escape_latex_entities(ournode.text)
for child in ournode:
subcontent += self.tolatex(child)
if ournode.tag == 'h1':
buffer += '\n\n\\section{%s}\n' % subcontent
elif ournode.tag == 'h2':
buffer += '\n\n\\subsection{%s}\n' % subcontent
elif ournode.tag == 'h3':
buffer += '\n\\subsubsection{%s}\n' % subcontent
elif ournode.tag == 'h4':
buffer += '\n\\paragraph{%s}\n' % subcontent
elif ournode.tag == 'hr':
buffer += r'\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}'
elif ournode.tag == 'ul':
# no need for leading \n as one will be provided by li
buffer += """
\\begin{itemize}%s
\\end{itemize}
""" % subcontent
elif ournode.tag == 'ol':
# no need for leading \n as one will be provided by li
buffer += """
\\begin{enumerate}%s
\\end{enumerate}
""" % subcontent
elif ournode.tag == 'li':
buffer += """
\\item %s""" % subcontent.strip()
elif ournode.tag == 'blockquote':
# use quotation rather than quote as quotation can support multiple
# paragraphs
buffer += """
\\begin{quotation}
%s
\\end{quotation}
""" % subcontent.strip()
# ignore 'code' when inside pre tags
# (mkdn produces <pre><code></code></pre>)
elif (ournode.tag == 'pre' or (ournode.tag == 'pre' and ournode.parentNode.tag != 'pre')):
buffer += """
\\begin{verbatim}
%s
\\end{verbatim}
""" % subcontent.strip()
elif ournode.tag == 'q':
buffer += "`%s'" % subcontent.strip()
elif ournode.tag == 'p':
if self.configs.get('apply_br'):
subcontent = subcontent.replace('\n', '\\\\\\relax\n')
buffer += '\n%s\n' % subcontent.strip()
elif ournode.tag == 'strong':
buffer += '\\textbf{%s}' % subcontent.strip()
elif ournode.tag == 'em':
buffer += '\\emph{%s}' % subcontent.strip()
elif ournode.tag in ('table', 'thead', 'tbody', 'tr', 'th', 'td'):
raise RuntimeError('Unexpected table in markdown data for LaTeX')
elif ournode.tag == 'img':
buffer += latex_render_image(ournode.get('src'), ournode.get('alt'), tmpdir=self.configs.get('tmpdir'))[0]
elif ournode.tag == 'a':
# this one gets escaped in convert_link_to_latex
buffer += '<a href="{}">{}</a>'.format(ournode.get('href'), subcontent)
else:
buffer = subcontent
if ournode.tail:
buffer += escape_latex_entities(ournode.tail)
return buffer
class UnescapeHtmlTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
return unescape_html_entities(text)
# ========================= MATH =================================
class MathTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, instr):
"""
Convert all math sections in {text} whether latex, asciimathml or
latexmathml formatted to latex.
This assumes you are using $$ as your mathematics delimiter (*not* the
standard asciimathml or latexmathml delimiter).
"""
def repl_1(matchobj):
text = unescape_latex_entities(matchobj.group(1))
tmp = text.strip()
if tmp.startswith('\\[') or tmp.startswith('\\begin'):
return text
else:
return '\\[%s\\]\n' % text
def repl_2(matchobj):
text = unescape_latex_entities(matchobj.group(1))
return f'${text}${matchobj.group(2)}'
# $$ ..... $$
pat = re.compile(r'^\$\$([^$]*)\$\$\s*$', re.MULTILINE)
out = pat.sub(repl_1, instr)
# Jones, $x=3$, is ...
pat3 = re.compile(r'\$([^$]+)\$(\s|$)')
out = pat3.sub(repl_2, out)
# # $100 million
# pat2 = re.compile('([^\$])\$([^\$])')
# out = pat2.sub('\g<1>\\$\g<2>', out)
# some extras due to asciimathml
# out = out.replace('\\lt', '<')
# out = out.replace(' * ', ' \\cdot ')
# out = out.replace('\\del', '\\partial')
return out
# ========================== LINKS =================================
class LinkTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, instr):
new_blocks = [re.sub(r'<a[^>]*>([^<]+)</a>', lambda m: convert_link_to_latex(m.group(0)).strip(), block)
for block in instr.split("\n\n")]
return '\n\n'.join(new_blocks)
def convert_link_to_latex(instr):
dom = html5parser.fragment_fromstring(instr)
return '\\href{%s}{%s}' % (latex_escape(dom.get('href'), ignore_math=True), dom.text)
|
florentx/OpenUpgrade
|
refs/heads/8.0
|
addons/event/wizard/__init__.py
|
435
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Epirex/android_external_chromium_org
|
refs/heads/cm-11.0
|
tools/perf/page_sets/PRESUBMIT.py
|
23
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
def LoadSupport(input_api):
if 'cloud_storage' not in globals():
# Avoid leaking changes to global sys.path.
_old_sys_path = sys.path
try:
telemetry_path = os.path.join(os.path.dirname(os.path.dirname(
input_api.PresubmitLocalPath())), 'telemetry')
sys.path = [telemetry_path] + sys.path
from telemetry.page import cloud_storage
globals()['cloud_storage'] = cloud_storage
finally:
sys.path = _old_sys_path
return globals()['cloud_storage']
def _SyncFilesToCloud(input_api, output_api):
"""Searches for .sha1 files and uploads them to Cloud Storage.
It validates all the hashes and skips upload if not necessary.
"""
cloud_storage = LoadSupport(input_api)
# Look in both buckets, in case the user uploaded the file manually. But this
# script focuses on WPR archives, so it only uploads to the internal bucket.
hashes_in_cloud_storage = cloud_storage.List(cloud_storage.INTERNAL_BUCKET)
hashes_in_cloud_storage += cloud_storage.List(cloud_storage.PUBLIC_BUCKET)
results = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
hash_path = affected_file.AbsoluteLocalPath()
file_path, extension = os.path.splitext(hash_path)
if extension != '.sha1':
continue
with open(hash_path, 'rb') as f:
file_hash = f.read(1024).rstrip()
if file_hash in hashes_in_cloud_storage:
results.append(output_api.PresubmitNotifyResult(
'File already in Cloud Storage, skipping upload: %s' % hash_path))
continue
if not re.match('^([A-Za-z0-9]{40})$', file_hash):
results.append(output_api.PresubmitError(
'Hash file does not contain a valid SHA-1 hash: %s' % hash_path))
continue
if not os.path.exists(file_path):
results.append(output_api.PresubmitError(
'Hash file exists, but file not found: %s' % hash_path))
continue
if cloud_storage.GetHash(file_path) != file_hash:
results.append(output_api.PresubmitError(
'Hash file does not match file\'s actual hash: %s' % hash_path))
continue
try:
cloud_storage.Insert(cloud_storage.INTERNAL_BUCKET, file_hash, file_path)
results.append(output_api.PresubmitNotifyResult(
'Uploaded file to Cloud Storage: %s' % hash_path))
except cloud_storage.CloudStorageError, e:
results.append(output_api.PresubmitError(
'Unable to upload to Cloud Storage: %s\n\n%s' % (hash_path, e)))
return results
def CheckChangeOnUpload(input_api, output_api):
return _SyncFilesToCloud(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _SyncFilesToCloud(input_api, output_api)
|
mattmccarthy11/vidly-development
|
refs/heads/master
|
bk/mediacore/controllers/admin/index.py
|
14
|
from mediadrop.controllers.admin.index import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.