code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Schedule(Model):
"""The schedule according to which jobs will be created.
:param do_not_run_until: The earliest time at which any job may be created
under this job schedule. If you do not specify a doNotRunUntil time, the
schedule becomes ready to create jobs immediately.
:type do_not_run_until: datetime
:param do_not_run_after: A time after which no job will be created under
this job schedule. The schedule will move to the completed state as soon
as this deadline is past and there is no active job under this job
schedule. If you do not specify a doNotRunAfter time, and you are creating
a recurring job schedule, the job schedule will remain active until you
explicitly terminate it.
:type do_not_run_after: datetime
:param start_window: The time interval, starting from the time at which
the schedule indicates a job should be created, within which a job must be
created. If a job is not created within the startWindow interval, then the
'opportunity' is lost; no job will be created until the next recurrence of
the schedule. If the schedule is recurring, and the startWindow is longer
than the recurrence interval, then this is equivalent to an infinite
startWindow, because the job that is 'due' in one recurrenceInterval is
not carried forward into the next recurrence interval. The default is
infinite. The minimum value is 1 minute. If you specify a lower value, the
Batch service rejects the schedule with an error; if you are calling the
REST API directly, the HTTP status code is 400 (Bad Request).
:type start_window: timedelta
:param recurrence_interval: The time interval between the start times of
two successive jobs under the job schedule. A job schedule can have at
most one active job under it at any given time. Because a job schedule can
have at most one active job under it at any given time, if it is time to
create a new job under a job schedule, but the previous job is still
running, the Batch service will not create the new job until the previous
job finishes. If the previous job does not finish within the startWindow
period of the new recurrenceInterval, then no new job will be scheduled
for that interval. For recurring jobs, you should normally specify a
jobManagerTask in the jobSpecification. If you do not use jobManagerTask,
you will need an external process to monitor when jobs are created, add
tasks to the jobs and terminate the jobs ready for the next recurrence.
The default is that the schedule does not recur: one job is created,
within the startWindow after the doNotRunUntil time, and the schedule is
complete as soon as that job finishes. The minimum value is 1 minute. If
you specify a lower value, the Batch service rejects the schedule with an
error; if you are calling the REST API directly, the HTTP status code is
400 (Bad Request).
:type recurrence_interval: timedelta
"""
_attribute_map = {
'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'},
'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'},
'start_window': {'key': 'startWindow', 'type': 'duration'},
'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'},
}
def __init__(self, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None):
self.do_not_run_until = do_not_run_until
self.do_not_run_after = do_not_run_after
self.start_window = start_window
self.recurrence_interval = recurrence_interval
|
SUSE/azure-sdk-for-python
|
azure-batch/azure/batch/models/schedule.py
|
Python
|
mit
| 4,205
|
# -*- coding: utf-8 -*-
#
# CampbellSiegert.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# CampbellSiegert.py
#
# Example script that applies Campbell's theorem and Siegert's rate approximation.
#
# This script calculates the firing rate of an integrate-and-fire neuron
# in response to a series of Poisson generators, each specified with
# a rate and a synaptic weight.
# The calculated rate is compared with a simulation using the iaf_psc_alpha model
#
#
#
# Sven Schrader, Nov 2008, Siegert implementation by Tom Tetzlaff
from scipy.special import erf
from scipy.optimize import fmin
import numpy
from numpy import sqrt, exp
import pylab
import nest
# example 1
weights = [0.1] # mV psp amplitudes
rates = [8000.] # Hz
# example 2, should have same result as example 1
#weights = [0.1, 0.1]
#rates = [4000., 4000.]
Cm = 250. # pF, capacitance
tau_syn_ex = 0.5 # ms, synaptic time constants
tau_syn_in = 2.0 #
tau_m = 20. # ms, membrane time constant
tref = 2.0 # ms, refractory period
V0 = 0.0 # mV, resting potential
Vth = 20.0 # mV, firing threshold
simtime = 20000 # ms
n_neurons = 10 # number of simulated neurons
pi = numpy.pi
e = exp(1)
pF = 1e-12
ms = 1e-3
pA = 1e-12
mV = 1e-3
mu = 0.0
sigma2 = 0.0
J = []
assert(len(weights) == len(rates))
########################################################################################
# Analytical section
for rate, weight in zip(rates, weights):
if weight >0:
tau_s = tau_syn_ex
else:
tau_s = tau_syn_in
t_psp = numpy.arange(0, 10 * (tau_m*ms + tau_s*ms),0.0001 )
# calculation of a single PSP
psp = lambda x: -(Cm*pF) / (tau_s*ms) * (1/(Cm*pF)) * (e/(tau_s*ms)) * \
(((-x * exp(-x/(tau_s*ms))) / (1/(tau_s*ms )-1 / (tau_m*ms))) +\
(exp(-x/(tau_m*ms)) - exp(-x/(tau_s*ms))) / ((1/(tau_s*ms) - 1/(tau_m*ms))**2) )
min_result = fmin(psp, [0], full_output=1, disp=0)
fudge = -1./min_result[1] # fudge is used here to scale psC amplitude from psP amplitude
J.append( Cm*weight/tau_s*fudge) # <-------|
# Campbell's Theorem
# the mean membrane potential mu and variance sigma adds up for each Poisson source
mu += ((V0*mV) + rate * \
(J[-1]*pA) * (tau_s*ms) * e * (tau_m*ms) / (Cm*pF))
sigma2 += rate * \
(2* tau_m*ms + tau_s*ms ) * \
(J[-1]*pA * tau_s*ms *e * tau_m*ms/ ( 2 * (Cm*pF) * (tau_m*ms + tau_s*ms) ) ) ** 2
sigma = sqrt(sigma2)
# Siegert's rate approximation
num_iterations = 100
ul = (Vth*mV - mu) / (sigma)/sqrt(2)
ll = (V0*mV - mu) / (sigma)/sqrt(2)
interval = (ul-ll)/num_iterations
tmpsum = 0.0
for cu in range(0,num_iterations+1):
u = ll + cu * interval
f = exp(u**2)*(1+erf(u))
tmpsum += interval * sqrt(pi) * f
r = 1. / (tref*ms + tau_m*ms * tmpsum)
########################################################################################
# Simulation section
nest.ResetKernel()
nest.sr('20 setverbosity')
neurondict = {'V_th':Vth, 'tau_m':tau_m, 'tau_syn_ex':tau_syn_ex,'tau_syn_in':tau_syn_in, 'C_m':Cm, 'E_L':V0, 't_ref':tref, 'V_m': V0, 'V_reset': V0}
if (mu*1000) < Vth:
neurondict['V_m'] = mu*1000.
nest.SetDefaults('iaf_psc_alpha', neurondict)
n = nest.Create('iaf_psc_alpha', n_neurons)
n_free = nest.Create('iaf_psc_alpha', 1 ,[{'V_th':999999.}]) # high threshold as we want free membrane potential
pg = nest.Create('poisson_generator', len(rates), [ {'rate':float(rate_i)} for rate_i in rates] )
vm = nest.Create('voltmeter', 1, [{'record_to':['memory'], 'withtime':True, 'withgid':True, 'interval':.1}])
sd = nest.Create('spike_detector',1, [{'record_to':['memory'], 'withtime':True, 'withgid':True}])
for i, currentpg in enumerate(pg):
nest.Connect([currentpg], n, syn_spec={'weight': float(J[i]), 'delay': 0.1})
nest.Connect([currentpg], n_free, syn_spec={'weight':J[i]})
nest.Connect(vm, n_free)
nest.Connect(n, sd)
nest.Simulate(simtime)
# free membrane potential (first 100 steps are omitted)
v_free = nest.GetStatus(vm,'events')[0]['V_m'][100:-1]
print('mean membrane potential (actual / calculated): {0} / {1}'.format(numpy.mean(v_free), mu * 1000))
print('variance (actual / calculated): {0} / {1}'.format(numpy.var(v_free), sigma2 * 1e6))
print('firing rate (actual / calculated): {0} / {1}'.format(nest.GetStatus(sd, 'n_events')[0] / (n_neurons * simtime * ms), r))
|
INM-6/nest-git-migration
|
pynest/examples/CampbellSiegert.py
|
Python
|
gpl-2.0
| 5,481
|
"""
inotify logs filesystem activity that may be directly or indirectly caused
by the test that is running. It requires the inotify-tools package, more
specifically, the inotifywait tool.
Heavily inspired / shamelessly copied from the kvm_stat profiler.
:copyright: Red Hat 2013
:author: Cleber Rosa <cleber@redhat.com>
"""
import os
import subprocess
import logging
from autotest.client import profiler, os_dep
class inotify(profiler.profiler):
"""
Profiler based on inotifywait from inotify-tools
"""
version = 1
def _build_command_line(self, paths, test):
default_opts = "-m -t 0 --format='%T|%,e|%w|%f' --timefmt '%m/%d %X'"
paths_valid = [p for p in paths if os.path.exists(p)]
paths_str = ' '.join(paths_valid)
output_option = '-o %s' % os.path.join(test.profdir, 'inotify')
options = '%s %s' % (default_opts, output_option)
return '%s %s %s' % (self.inotifywait, options, paths_str)
def initialize(self, paths=[]):
try:
self.inotifywait = os_dep.command('inotifywait')
except ValueError:
logging.error('Command inotifywait from inotify-tools is not present')
self.inotifywait = None
self.paths = paths
def start(self, test):
if self.inotifywait is None:
logging.error("Profiler inotify won't perform any action because "
"the inotifywait tool from inotify-tools is missing "
"on this system")
return
# monitor the test directories by default
if not self.paths:
self.paths = [test.bindir, test.srcdir, test.tmpdir]
self.command_line = self._build_command_line(self.paths, test)
logging.debug('running inotify profiler command: %s',
self.command_line)
p = subprocess.Popen(self.command_line,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = p.pid
def stop(self, test):
if self.inotifywait is None:
return
try:
os.kill(self.pid, 15)
except OSError:
pass
def report(self, test):
return None
|
lmr/autotest
|
client/profilers/inotify/inotify.py
|
Python
|
gpl-2.0
| 2,298
|
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.util import cUtil
from resources.lib.parser import cParser
from resources.lib.gui.gui import cGui
from resources.hosters.hoster import iHoster
import urllib
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'VideoWeed'
self.__sFileName = self.__sDisplayName
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]'+self.__sDisplayName+'[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'videoweed'
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return 'flashvars.file=\"([^\"]+)\"';
def __getKey(self):
oRequestHandler = cRequestHandler(self.__sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'var fkz="(.+?)";'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
aResult = aResult[1][0].replace('.','%2E')
return aResult
return ''
def __getIdFromUrl(self):
sPattern = "v=([^&]+)"
oParser = cParser()
aResult = oParser.parse(self.__sUrl, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return ''
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
self.__sUrl = self.__sUrl.replace('http://www.videoweed.es/', '')
self.__sUrl = self.__sUrl.replace('http://embed.videoweed.es/', '')
self.__sUrl = self.__sUrl.replace('file/', '')
self.__sUrl = self.__sUrl.replace('embed.php?v=', '')
self.__sUrl = self.__sUrl.replace('&width=711&height=400', '')
self.__sUrl = 'http://embed.videoweed.es/embed.php?v=' + str(self.__sUrl)
def checkUrl(self, sUrl):
return True
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
cGui().showInfo('Resolve', self.__sDisplayName, 5)
api_call = ('http://www.videoweed.es/api/player.api.php?user=undefined&codes=1&file=%s&pass=undefined&key=%s') % (self.__getIdFromUrl(), self.__getKey())
oRequest = cRequestHandler(api_call)
sHtmlContent = oRequest.request()
sPattern = 'url=(.+?)&title'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
stream_url = urllib.unquote(aResult[1][0])
return True, stream_url
return False, False
|
mino60/venom-xbmc-addons-beta
|
plugin.video.vstream/resources/hosters/videoweed.py
|
Python
|
gpl-2.0
| 2,917
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth.decorators import login_required
from views import (OrganizationAdminView,
CustomOrganizationCreate,
CustomOrganizationDetail,
CustomOrganizationUpdate,
CustomOrganizationUserCreate,
CustomOrganizationUserUpdate,
CustomOrganizationUserRemind,
CustomOrganizationUserDelete,
CustomOrganizationUserList,
CustomOrganizationUserLeave)
urlpatterns = patterns('',
# Use custom urganization user URLs
url(r'^add/$', view=login_required(CustomOrganizationCreate.as_view()),
name="organization_add"),
url(r'^(?P<organization_pk>[\d]+)/$',
view=login_required(CustomOrganizationDetail.as_view()),
name="organization_detail"),
url(r'^(?P<organization_pk>[\d]+)/edit/$',
view=login_required(CustomOrganizationUpdate.as_view()),
name="organization_edit"),
url(r'^(?P<organization_pk>[\d]+)/admin/$',
view=login_required(OrganizationAdminView.as_view()),
name="organization_admin"),
url(r'^(?P<organization_pk>[\d]+)/people/add/$',
view=login_required(CustomOrganizationUserCreate.as_view()),
name="organization_user_add"),
url(r'^(?P<organization_pk>[\d]+)/people/(?P<user_pk>[\d]+)/edit/$',
view=login_required(CustomOrganizationUserUpdate.as_view()),
name="organization_user_edit"),
url(r'^(?P<organization_pk>[\d]+)/people/(?P<user_pk>[\d]+)/delete/$',
view=login_required(CustomOrganizationUserDelete.as_view()),
name="organization_user_delete"),
url(r'^(?P<organization_pk>[\d]+)/people/(?P<user_pk>[\d]+)/remind/$',
view=login_required(CustomOrganizationUserRemind.as_view()),
name="organization_user_remind"),
url(r'^(?P<organization_pk>[\d]+)/people/$',
view=login_required(CustomOrganizationUserList.as_view()),
name="organization_user_list"),
url(r'^(?P<organization_pk>[\d]+)/people/leave/$',
view=login_required(CustomOrganizationUserLeave.as_view()),
name="organization_user_leave"),
url(r'', include('organizations.urls')),
)
|
aptivate/econsensus
|
django/econsensus/custom_organizations/urls.py
|
Python
|
gpl-3.0
| 2,276
|
from __future__ import absolute_import
import re
from django.http import HttpResponseRedirect
from django.conf import settings
from ..settings import ALLOW_ANONYMOUS_ACCESS
EXEMPT_URLS = [re.compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [re.compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware:
"""
Middleware that requires a user to be authenticated to view any page other
than LOGIN_URL. Exemptions to this requirement can optionally be specified
in settings via a list of regular expressions in LOGIN_EXEMPT_URLS (which
you can copy from your urls.py).
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
if not ALLOW_ANONYMOUS_ACCESS:
assert hasattr(request, 'user'), "The Login Required middleware\
requires authentication middleware to be installed. Edit your\
MIDDLEWARE_CLASSES setting to insert\
'django.contrib.auth.middlware.AuthenticationMiddleware'. If that doesn't\
work, ensure your TEMPLATE_CONTEXT_PROCESSORS setting includes\
'django.core.context_processors.auth'."
if not request.user.is_authenticated():
path = request.path_info.lstrip('/')
if not any(m.match(path) for m in EXEMPT_URLS):
return HttpResponseRedirect(settings.LOGIN_URL)
|
rosarior/rua
|
rua/apps/common/middleware/login_required_middleware.py
|
Python
|
gpl-3.0
| 1,503
|
"""Tests for events.py."""
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
import asyncio
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [[family, socket.SOCK_STREAM, 6, '', (host, port)]]
else:
return [[family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0)]]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [(host, 80)
for host in hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [(host, 80, 0, 0)
for host in hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = [sock.getsockbyname()[0] for sock in server.sockets]
self.assertEqual(server_hosts, hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6, ['::1', '::2'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'certificate verify failed '):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'certificate verify failed '):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_sock_connect_address(self):
# In debug mode, sock_connect() must ensure that the address is already
# resolved (call _check_resolved_address())
self.loop.set_debug(True)
addresses = [(socket.AF_INET, ('www.python.org', 80))]
if support.IPV6_ENABLED:
addresses.extend((
(socket.AF_INET6, ('www.python.org', 80)),
(socket.AF_INET6, ('www.python.org', 80, 0, 0)),
))
for family, address in addresses:
for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
sock = socket.socket(family, sock_type)
with sock:
sock.setblocking(False)
connect = self.loop.sock_connect(sock, address)
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(connect)
self.assertIn('address must be resolved',
str(cm.exception))
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_handle_from_handle(self):
def callback(*args):
return args
h1 = asyncio.Handle(callback, (), loop=self.loop)
self.assertRaises(
AssertionError, asyncio.Handle, h1, (), self.loop)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
class TimerTests(unittest.TestCase):
def setUp(self):
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
if __name__ == '__main__':
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_asyncio/test_events.py
|
Python
|
gpl-3.0
| 93,815
|
import networkx as nx
from networkx import is_strongly_regular
class TestDistanceRegular:
def test_is_distance_regular(self):
assert nx.is_distance_regular(nx.icosahedral_graph())
assert nx.is_distance_regular(nx.petersen_graph())
assert nx.is_distance_regular(nx.cubical_graph())
assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3))
assert nx.is_distance_regular(nx.tetrahedral_graph())
assert nx.is_distance_regular(nx.dodecahedral_graph())
assert nx.is_distance_regular(nx.pappus_graph())
assert nx.is_distance_regular(nx.heawood_graph())
assert nx.is_distance_regular(nx.cycle_graph(3))
# no distance regular
assert not nx.is_distance_regular(nx.path_graph(4))
def test_not_connected(self):
G = nx.cycle_graph(4)
nx.add_cycle(G, [5, 6, 7])
assert not nx.is_distance_regular(G)
def test_global_parameters(self):
b, c = nx.intersection_array(nx.cycle_graph(5))
g = nx.global_parameters(b, c)
assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)]
b, c = nx.intersection_array(nx.cycle_graph(3))
g = nx.global_parameters(b, c)
assert list(g) == [(0, 0, 2), (1, 1, 0)]
def test_intersection_array(self):
b, c = nx.intersection_array(nx.cycle_graph(5))
assert b == [2, 1]
assert c == [1, 1]
b, c = nx.intersection_array(nx.dodecahedral_graph())
assert b == [3, 2, 1, 1, 1]
assert c == [1, 1, 1, 2, 3]
b, c = nx.intersection_array(nx.icosahedral_graph())
assert b == [5, 2, 1]
assert c == [1, 2, 5]
class TestStronglyRegular:
"""Unit tests for the :func:`~networkx.is_strongly_regular`
function.
"""
def test_cycle_graph(self):
"""Tests that the cycle graph on five vertices is strongly
regular.
"""
G = nx.cycle_graph(5)
assert is_strongly_regular(G)
def test_petersen_graph(self):
"""Tests that the Petersen graph is strongly regular."""
G = nx.petersen_graph()
assert is_strongly_regular(G)
def test_path_graph(self):
"""Tests that the path graph is not strongly regular."""
G = nx.path_graph(4)
assert not is_strongly_regular(G)
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/tests/test_distance_regular.py
|
Python
|
gpl-3.0
| 2,312
|
#
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import collections
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
nxos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE'])),
'use_ssl': dict(type='bool'),
'validate_certs': dict(type='bool'),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
'transport': dict(choices=['cli', 'nxapi'])
}
# Add argument's default value here
ARGS_DEFAULT_VALUE = {
'timeout': 10
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in nxos_argument_spec:
if key not in ['provider', 'transport'] and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
# set argument's default value if not provided in input
# This is done to avoid unwanted argument deprecation warning
# in case argument is not given as input (outside provider).
for key in ARGS_DEFAULT_VALUE:
if not module.params.get(key, None):
module.params[key] = ARGS_DEFAULT_VALUE[key]
if provider:
for param in ('password',):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in nxos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
if 'nxapi' in (transport, provider_transport):
conn = Nxapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=err)
cfg = str(out).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for item in to_list(commands):
if item['output'] == 'json' and not is_json(item['command']):
cmd = '%s | json' % item['command']
elif item['output'] == 'text' and is_json(item['command']):
cmd = item['command'].split('|')[0]
else:
cmd = item['command']
rc, out, err = self.exec_command(cmd)
if check_rc and rc != 0:
self._module.fail_json(msg=err)
try:
out = self._module.from_json(out)
except ValueError:
out = str(out).strip()
responses.append(out)
return responses
def load_config(self, config):
"""Sends configuration commands to the remote device
"""
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=err)
for cmd in config:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=err)
self.exec_command('end')
class Nxapi:
OUTPUT_TO_COMMAND_TYPE = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
def __init__(self, module):
self._module = module
self._nxapi_auth = None
self._device_configs = {}
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
host = self._module.params['host']
port = self._module.params['port']
if self._module.params['use_ssl']:
proto = 'https'
port = port or 443
else:
proto = 'http'
port = port or 80
self._url = '%s://%s:%s/ins' % (proto, host, port)
def _error(self, msg, **kwargs):
self._nxapi_auth = None
if 'url' not in kwargs:
kwargs['url'] = self._url
self._module.fail_json(msg=msg, **kwargs)
def _request_builder(self, commands, output, version='1.0', chunk='0', sid=None):
"""Encodes a NXAPI JSON request message
"""
try:
command_type = self.OUTPUT_TO_COMMAND_TYPE[output]
except KeyError:
msg = 'invalid format, received %s, expected one of %s' % \
(output, ','.join(self.OUTPUT_TO_COMMAND_TYPE.keys()))
self._error(msg=msg)
if isinstance(commands, (list, set, tuple)):
commands = ' ;'.join(commands)
msg = {
'version': version,
'type': command_type,
'chunk': chunk,
'sid': sid,
'input': commands,
'output_format': 'json'
}
return dict(ins_api=msg)
def send_request(self, commands, output='text', check_status=True):
# only 10 show commands can be encoded in each request
# messages sent to the remote device
if output != 'config':
commands = collections.deque(to_list(commands))
stack = list()
requests = list()
while commands:
stack.append(commands.popleft())
if len(stack) == 10:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
stack = list()
if stack:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
else:
body = self._request_builder(commands, 'config')
requests = [self._module.jsonify(body)]
headers = {'Content-Type': 'application/json'}
result = list()
timeout = self._module.params['timeout']
for req in requests:
if self._nxapi_auth:
headers['Cookie'] = self._nxapi_auth
response, headers = fetch_url(
self._module, self._url, data=req, headers=headers,
timeout=timeout, method='POST'
)
self._nxapi_auth = headers.get('set-cookie')
if headers['status'] != 200:
self._error(**headers)
try:
response = self._module.from_json(response.read())
except ValueError:
self._module.fail_json(msg='unable to parse response')
output = response['ins_api']['outputs']['output']
for item in to_list(output):
if check_status and item['code'] != '200':
self._error(output=output, **item)
elif 'body' in item:
result.append(item['body'])
#else:
# error in command but since check_status is disabled
# silently drop it.
#result.append(item['msg'])
return result
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out[0]).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
output = None
queue = list()
responses = list()
_send = lambda commands, output: self.send_request(commands, output, check_status=check_rc)
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).split('|')[0]
item['output'] = 'json'
if all((output == 'json', item['output'] == 'text')) or all((output =='text', item['output'] == 'json')):
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
return responses
def load_config(self, commands):
"""Sends the ordered set of commands to the device
"""
commands = to_list(commands)
self.send_request(commands, output='config')
is_json = lambda x: str(x).endswith('| json')
is_text = lambda x: not is_json
def is_nxapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'nxapi' in (transport, provider_transport)
def to_command(module, commands):
if is_nxapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(),
answer=dict()
), module)
commands = transform(to_list(commands))
for index, item in enumerate(commands):
if is_json(item['command']):
item['output'] = 'json'
elif is_text(item['command']):
item['output'] = 'text'
return commands
def get_config(module, flags=[]):
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc)
def load_config(module, config):
conn = get_connection(module)
return conn.load_config(config)
|
cherusk/ansible
|
lib/ansible/module_utils/nxos.py
|
Python
|
gpl-3.0
| 12,867
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
# individual modules might disagree but as the generic the action plugin, pass at this point.
self._supports_check_mode = True
self._supports_async = True
results = super(ActionModule, self).run(tmp, task_vars)
if not results.get('skipped'):
if results.get('invocation', {}).get('module_args'):
# avoid passing to modules in case of no_log
# should not be set anymore but here for backwards compatibility
del results['invocation']['module_args']
# do work!
results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars, wrap_async=self._task.async))
# hack to keep --verbose from showing all the setup module results
# moved from setup module as now we filter out all _ansible_ from results
if self._task.action == 'setup':
results['_ansible_verbose_override'] = True
return results
|
QijunPan/ansible
|
lib/ansible/plugins/action/normal.py
|
Python
|
gpl-3.0
| 1,962
|
#
# Copyright 2010-2012 Fabric Engine Inc. All rights reserved.
#
import fabric
client = fabric.createClient()
cv = client.MR.createConstValue("Size", 100)
ago = client.KLC.createArrayGeneratorOperator("foo.kl", "operator foo(io Float64 output, Size index) { output = Float64(index+1); }", "foo")
ag = client.MR.createArrayGenerator(cv, ago)
mo = client.KLC.createArrayMapOperator("map.kl", "operator map(Float64 input, io Float64 output, Size index, Size count, Scalar multiplier) { output = input * multiplier; }", "map")
sv = client.MR.createConstValue("Scalar", 3.14)
m = client.MR.createArrayMap(ag, mo, sv)
ro = client.KLC.createReduceOperator("reduce.kl", "operator reduce(Float64 input, io Float64 result, Size index, Size count, Scalar multiplier) { result += input * multiplier; }", "reduce")
sv = client.MR.createConstValue("Scalar", 3.14)
r = client.MR.createReduce(m, ro, sv)
print(r.produce())
client.close()
|
ghostx2013/FabricEngine_Backup
|
Native/Test/Python/mr-sum.py
|
Python
|
agpl-3.0
| 932
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import pybossa.util
from default import Test, db
from mock import patch
from datetime import datetime, timedelta
import dateutil.parser
import calendar
import time
import csv
import tempfile
class TestWebModule(Test):
def setUp(self):
super(TestWebModule, self).setUp()
with self.flask_app.app_context():
self.create()
def test_jsonpify(self):
"""Test jsonpify decorator works."""
res = self.app.get('/api/app/1?callback=mycallback')
err_msg = "mycallback should be included in the response"
assert "mycallback" in res.data, err_msg
err_msg = "Status code should be 200"
assert res.status_code == 200, err_msg
def test_cors(self):
"""Test CORS decorator works."""
res = self.app.get('/api/app/1')
err_msg = "CORS should be enabled"
print res.headers
assert res.headers['Access-Control-Allow-Origin'] == '*', err_msg
methods = ['PUT', 'HEAD', 'DELETE', 'OPTIONS', 'GET']
for m in methods:
assert m in res.headers['Access-Control-Allow-Methods'], err_msg
assert res.headers['Access-Control-Max-Age'] == '21600', err_msg
headers = 'CONTENT-TYPE, AUTHORIZATION'
assert res.headers['Access-Control-Allow-Headers'] == headers, err_msg
def test_pretty_date(self):
"""Test pretty_date works."""
now = datetime.now()
pd = pybossa.util.pretty_date()
assert pd == "just now", pd
pd = pybossa.util.pretty_date(now.isoformat())
assert pd == "just now", pd
pd = pybossa.util.pretty_date(calendar.timegm(time.gmtime()))
assert pd == "just now", pd
d = now + timedelta(days=10)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '', pd
d = now - timedelta(seconds=10)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '10 seconds ago', pd
d = now - timedelta(minutes=1)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == 'a minute ago', pd
d = now - timedelta(minutes=2)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '2 minutes ago', pd
d = now - timedelta(hours=1)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == 'an hour ago', pd
d = now - timedelta(hours=5)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '5 hours ago', pd
d = now - timedelta(days=1)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == 'Yesterday', pd
d = now - timedelta(days=5)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '5 days ago', pd
d = now - timedelta(weeks=1)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '1 weeks ago', pd
d = now - timedelta(days=32)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '1 month ago', pd
d = now - timedelta(days=62)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '2 months ago', pd
d = now - timedelta(days=366)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '1 year ago', pd
d = now - timedelta(days=766)
pd = pybossa.util.pretty_date(d.isoformat())
assert pd == '2 years ago', pd
def test_pagination(self):
"""Test Class Pagination works."""
page = 1
per_page = 5
total_count = 10
p = pybossa.util.Pagination(page, per_page, total_count)
assert p.page == page, p.page
assert p.per_page == per_page, p.per_page
assert p.total_count == total_count, p.total_count
err_msg = "It should return two pages"
assert p.pages == 2, err_msg
p.total_count = 7
assert p.pages == 2, err_msg
p.total_count = 10
err_msg = "It should return False"
assert p.has_prev is False, err_msg
err_msg = "It should return True"
assert p.has_next is True, err_msg
p.page = 2
assert p.has_prev is True, err_msg
err_msg = "It should return False"
assert p.has_next is False, err_msg
for i in p.iter_pages():
err_msg = "It should return the page: %s" % page
assert i == page, err_msg
page += 1
def test_unicode_csv_reader(self):
"""Test unicode_csv_reader works."""
fake_csv = ['one, two, three']
err_msg = "Each cell should be encoded as Unicode"
for row in pybossa.util.unicode_csv_reader(fake_csv):
for item in row:
assert type(item) == unicode, err_msg
def test_UnicodeWriter(self):
"""Test UnicodeWriter class works."""
tmp = tempfile.NamedTemporaryFile()
uw = pybossa.util.UnicodeWriter(tmp)
fake_csv = ['one, two, three, {"i": 1}']
for row in csv.reader(fake_csv):
# change it for a dict
row[3] = dict(i=1)
uw.writerow(row)
tmp.seek(0)
err_msg = "It should be the same CSV content"
with open(tmp.name, 'rb') as f:
reader = csv.reader(f)
for row in reader:
for item in row:
assert item in fake_csv[0], err_msg
|
proyectos-analizo-info/pybossa-analizo-info
|
test/test_util.py
|
Python
|
agpl-3.0
| 6,074
|
""" parser to generate SAX events from a DOM tree
$Date: 2002/05/02 10:15:04 $ by $Author: loewis $
"""
from pyxml.sax._exceptions import SAXNotSupportedException, SAXNotRecognizedException
from pyxml.sax.xmlreader import XMLReader, AttributesNSImpl, AttributesImpl
from pyxml.sax.saxlib import LexicalHandler, DeclHandler
from pyxml.sax import handler
from pyxml.dom import Node, XMLNS_NAMESPACE
XMLNS_NS = XMLNS_NAMESPACE
class Dom2SaxParser(XMLReader):
""" Generate SAX events from a DOM tree
handle _ feature_namespaces
_ feature_namespace_prefixes,
_ property_lexical_handler
_ property_declaration_handler (not yet fully)
differences with standard sax parser:
_ no error handling (we start from a dom tree !!)
_ no locator (same reason)
"""
def __init__(self):
XMLReader.__init__(self)
self._lex_handler = LexicalHandler()
self._decl_handler = DeclHandler()
self._ns = 0
self._ns_prfx = 1
self._parsing = 0
## properties and features ##################################################
def getFeature(self, name):
if name == handler.feature_namespaces:
return self._ns
elif name == handler.feature_namespace_prefixes:
return self._ns_prfx
raise SAXNotRecognizedException("Feature '%s' not recognized"%name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == handler.feature_namespaces:
self._ns = state
elif name == handler.feature_namespace_prefixes:
self._ns_prfx = state
else:
raise SAXNotRecognizedException("Feature '%s' not recognized"%name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
if name == handler.property_declaration_handler:
return self._decl_handler_prop
raise SAXNotRecognizedException("Property '%s' not recognized"%name)
def setProperty(self, name, value):
if self._parsing:
raise SAXNotSupportedException("Cannot set properties while parsing")
if name == handler.property_lexical_handler:
self._lex_handler = value
elif name == handler.property_declaration_handler:
self._decl_handler = value
else:
raise SAXNotRecognizedException("Property '%s' not recognized"%name)
## parsing ################################################################
def parse(self, dom):
if self._parsing:
raise SAXNotSupportedException("Ask for parse while parsing")
self._parsing = 1
if self._ns: self._element_ = self._element_ns
else: self._element_ = self._element
self._from_dom(dom)
self._parsing = 0
## private #################################################################
def _from_dom(self, n):
while n:
type = n.nodeType
if type == Node.ELEMENT_NODE:
self._element_(n)
elif type == Node.TEXT_NODE:
self._cont_handler.characters(n.data)
elif type == Node.PROCESSING_INSTRUCTION_NODE:
self._cont_handler.processingInstruction(n.target, n.data)
elif type == Node.DOCUMENT_NODE:
self._cont_handler.startDocument()
self._from_dom(n.firstChild)
self._cont_handler.endDocument()
elif type == Node.DOCUMENT_FRAGMENT_NODE:
for n in n.childNodes:
self._cont_handler.startDocument()
self._from_dom(n.firstChild)
self._cont_handler.endDocument()
elif type == Node.CDATA_SECTION_NODE:
self._lex_handler.startCDATA()
self._cont_handler.characters(n.data)
self._lex_handler.endCDATA()
elif type == Node.COMMENT_NODE:
self._lex_handler.comment(n.data)
elif type == Node.DOCUMENT_TYPE_NODE:
self._lex_handler.startDTD(n.name, n.publicId, n.systemId)
for i in range(n.entities.length):
e = n.entities.item(i)
if e.publicId or e.systemId:
self._decl_handler.externalEntityDecl(
e.notationName, e.publicId, e.systemId)
else:
self._decl_handler.externalEntityDecl(
e.name, e.value)
self._lex_handler.endDTD()
elif type == Node.ENTITY_REFERENCE_NODE:
self._lex_handler.startEntity(n.nodeName)
self._from_dom(n.firstChild)
self._lex_handler.endEntity(n.nodeName)
#elif type == Node.ENTITY_NODE:
#elif type == Node.NOTATION_NODE:
n = n.nextSibling
def _element(self, n):
""" handle an ElementNode without NS interface"""
## convert DOM namedNodeMap to SAX attributes
nnm = n.attributes
attrs = {}
for a in nnm.values():
attrs[a.nodeName] = a.value
## handle element
name = n.nodeName
self._cont_handler.startElement(name, AttributesImpl(attrs))
self._from_dom(n.firstChild)
self._cont_handler.endElement(name)
def _element_ns(self, n):
""" handle an ElementNode with NS interface"""
## convert DOM namedNodeMap to SAX attributes NS
prefix_list = []
nnm = n.attributes
attrs, qnames = {}, {}
for a in nnm.values():
a_uri = a.namespaceURI
if a_uri == XMLNS_NS:
prefix, val = a.localName, a.value
self._cont_handler.startPrefixMapping(prefix, val)
prefix_list.append(prefix)
if self._ns_prfx:
name = (a_uri, prefix)
attrs[name] = val
qnames[name] = a.nodeName
else:
name = (a_uri, a.localName)
attrs[name] = a.value
qnames[name] = a.nodeName
## handle element NS
name = (n.namespaceURI, n.localName)
self._cont_handler.startElementNS(name, n.nodeName,
AttributesNSImpl(attrs, qnames))
self._from_dom(n.firstChild)
self._cont_handler.endElementNS(name, n.nodeName)
prefix_list.reverse()
map(self._cont_handler.endPrefixMapping, prefix_list)
## full sax handler, print each event to output ################################
class PrintSaxHandler:
## content handler #########################################################
def setDocumentLocator(self, locator):
print 'setDocumentLocator', locator
def startDocument(self):
print 'startDocument'
def endDocument(self):
print 'endDocument'
def startElement(self, name, attrs):
print 'startElement', name
for key, val in attrs.items():
print 'attribute', key, val
def endElement (self, name):
print 'endElement', name
def startElementNS(self, name, qname, attrs):
print 'startElementNS', name, qname
for key, val in attrs.items():
print 'attribute', key, val
def endElementNS (self, name, qname):
print 'endElementNS', name, qname
def startPrefixMapping(self, prefix, uri):
print 'startPrefixMapping', prefix, uri
def endPrefixMapping(self, prefix):
print 'endPrefixMapping', prefix
def processingInstruction(self, target, data):
print 'processingInstruction', target, data
def ignorableWhitespace(self, whitespace):
print 'ignorableWhitespace', whitespace
def characters(self, ch):
print 'characters', ch.encode('iso-8859-15')
## lexical handler #########################################################
def xmlDecl(self, version, encoding, standalone):
print 'xmlDecl', version, encoding, standalone
def comment(self, machin):
print 'comment', machin.encode('UTF-8')
def startEntity(self, name):
print 'startEntity', name
def endEntity(self, name):
print 'endEntity', name
def startCDATA(self):
print 'startCDATA'
def endCDATA(self):
print 'endCDATA'
def startDTD(self, name, public_id, system_id):
print 'startDTD', name, public_id, system_id
def endDTD(self):
print 'endDTD'
## DTD decl handler ########################################################
def attributeDecl(self, elem_name, attr_name, type, value_def, value):
print 'attributeDecl', elem_name, attr_name, type, value_def, value
def elementDecl(self, elem_name, content_model):
print 'elementDecl', elem_name, content_model
def internalEntityDecl(self, name, value):
print 'internalEntityDecl', name, value.encode('UTF-8')
def externalEntityDecl(self, name, public_id, system_id):
print 'externalEntityDecl', name, public_id, system_id
# Test ########################################################################
def _parse(parser, doc, features, properties):
import time
h = PrintSaxHandler()
parser.setContentHandler(h)
print '-'*80
print parser.__class__
print
for f,val in features:
try:
parser.setFeature(f, val)
print f, val
except Exception, e:
print e
for p, val in properties:
try:
if val:
parser.setProperty(p, h)
print p,val
except Exception, e:
print e
print '*'*80
t = time.time()
parser.parse(doc)
print '*'*80
print 'TEMPS:', time.time() - t
print
if __name__ == '__main__':
import sys
from pyxml.sax import make_parser
from pyxml.dom.ext.reader import Sax2
from pyxml.dom.ext import PrettyPrint
from pyxml.sax.handler import feature_namespaces,\
feature_namespace_prefixes, property_lexical_handler,\
property_declaration_handler
f1 = feature_namespaces
f2 = feature_namespace_prefixes
p1 = property_lexical_handler
p2 = property_declaration_handler
file = sys.argv[1]
r = Sax2.Reader()
f = open(file)
doc = r.fromStream(f)
print 'Initial document', doc, doc.__class__
PrettyPrint(doc)
for (val1,val2,val3,val4) in ((0,0,0,0),(0,1,1,1),(1,0,0,0),(1,1,1,1)):
for p,d in ((Dom2SaxParser(), doc),
(make_parser(['pyxml.sax.drivers2.drv_pyexpat']), f),
(make_parser(['pyxml.sax.drivers2.drv_xmlproc']), f)):
if not d is doc:
d = open(file)
_parse(p, d, ((f1, val1), (f2,val2)), ((p1,val3),(p2,val4)))
f.close()
|
selfcommit/gaedav
|
pyxml/dom/ext/Dom2Sax.py
|
Python
|
lgpl-2.1
| 11,076
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import math
import Box2D as box2d
from pilasengine.depurador.modo import ModoDepurador
from pilasengine import colores
PPM = 30
class ModoFisica(ModoDepurador):
def __init__(self, pilas, depurador):
ModoDepurador.__init__(self, pilas, depurador)
def realizar_dibujado(self, painter):
grosor = 1
cuerpos = self.pilas.fisica.mundo.bodies
painter.save()
self.pilas.camara.aplicar_transformaciones_completas(painter)
for cuerpo in cuerpos:
for fixture in cuerpo:
# cuerpo.type == 0 → estatico
# cuerpo.type == 1 → kinematico
# cuerpo.type == 2 → dinamico
if fixture.userData['sensor']:
if cuerpo.awake:
self._definir_trazo_verde(painter)
else:
self._definir_trazo_verde_oscuro(painter)
else:
if cuerpo.awake:
self._definir_trazo_blanco(painter)
else:
self._definir_trazo_gris(painter)
shape = fixture.shape
if isinstance(shape, box2d.b2PolygonShape):
vertices = [cuerpo.transform * v * PPM for v in shape.vertices]
#vertices = [pilas.escena_actual().camara.desplazar(v) for v in vertices]
#self._poligono(vertices, color=colores.negro, grosor=grosor+2, cerrado=True)
self._poligono(painter, vertices, color=colores.blanco, grosor=grosor, cerrado=True)
elif isinstance(shape, box2d.b2CircleShape):
(x, y) = cuerpo.transform * shape.pos * PPM
#(x, y) = pilas.escena_actual().camara.desplazar(cuerpo.transform * shape.pos * PPM)
# Dibuja el angulo de la circunferencia.
self._angulo(painter, x, y, - math.degrees(fixture.body.angle), shape.radius * PPM)
#lienzo.angulo(motor, x, y, - math.degrees(fixture.body.angle), shape.radius * PPM, pilas.colores.blanco, grosor=grosor)
# Dibuja el borde de la circunferencia.
self._circulo(painter, x, y, shape.radius * PPM)
#lienzo.circulo(motor, x, y, shape.radius * PPM, pilas.colores.negro, grosor=grosor+2)
#lienzo.circulo(motor, x, y, shape.radius * PPM, pilas.colores.blanco, grosor=grosor)
else:
# TODO: implementar las figuras de tipo "edge" y "loop".
raise Exception("No puedo identificar el tipo de figura.")
painter.restore()
def _poligono(self, painter, puntos, color=colores.negro, grosor=1, cerrado=False):
x, y = puntos[0]
if cerrado:
puntos.append((x, y))
for p in puntos[1:]:
nuevo_x, nuevo_y = p
self._linea(painter, x, y, nuevo_x, nuevo_y)
x, y = nuevo_x, nuevo_y
def _linea(self, painter, x0, y0, x1, y1):
x0, y0 = self.hacer_coordenada_pantalla_absoluta(x0, y0)
x1, y1 = self.hacer_coordenada_pantalla_absoluta(x1, y1)
painter.drawLine(x0, y0, x1, y1)
def hacer_coordenada_pantalla_absoluta(self, x, y):
dx = -self.pilas.camara.x
dy = self.pilas.camara.y
return (x + dx, dy - y)
def _angulo(self, painter, x, y, angulo, radio):
angulo_en_radianes = math.radians(-angulo)
dx = math.cos(angulo_en_radianes) * radio
dy = math.sin(angulo_en_radianes) * radio
self._linea(painter, x, y, x + dx, y + dy)
def _circulo(self, painter, x, y, radio):
x, y = self.hacer_coordenada_pantalla_absoluta(x, y)
#r, g, b, a = color.obtener_componentes()
#color = QtGui.QColor(r, g, b)
#pen = QtGui.QPen(color, grosor)
#painter.setPen(pen)
painter.drawEllipse(x-radio+1, y-radio+1, radio*2, radio*2)
|
hgdeoro/pilas
|
pilasengine/depurador/modo_fisica.py
|
Python
|
lgpl-3.0
| 4,211
|
'''
A collection of tools to assist in analyzing encrypted blobs of data
through chosen plaintext attacks.
Copyright (C) 2011-2012 Virtual Security Research, LLC
Author: Timothy D. Morgan
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License, version 3,
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
from buffertools import blockWiseDiff
# Chosen plaintext attack on ECB encryption
#
# The encryptionOracle should accept a single string argument,
# encrypt that argument as part of a larger (unknown) plaintext string,
# and then return the ciphertext.
#
# This function will then return a dictionary with information about the
# algorithm and chosen string attributes, including:
# block_size - the algorithm's block size
# chosen_offset - the chosen string's offset within the plaintext
# fragment_length - the length of a chosen from the chosen_offset to the
# end of its current block
#
def ECB_FindChosenOffset(encryptionOracle):
ret_val = {}
# Guaranteed to have one block boundary on 128 bit block ciphers
chosen_length = 17
chosen = 'O'*chosen_length
base = encryptionOracle(chosen)
chosen = 'X' + 'O'*(chosen_length-1)
test_result = encryptionOracle(chosen)
different_blocks = blockWiseDiff(1, base, test_result)
block_size = len(different_blocks)
# Sanity check
different_blocks = blockWiseDiff(block_size, base, test_result)
if different_blocks == None:
sys.stderr.write("ERROR: Block size test yielded undiff-able ciphertexts.\n")
return None
if len(different_blocks) > 1:
sys.stderr.write("ERROR: Block size test yielded multiple altered blocks (not ECB mode?).\n")
return None
for i in range(2,chosen_length):
chosen = 'X'*i + 'O'*(chosen_length-i)
test_result = encryptionOracle(chosen)
different_blocks = blockWiseDiff(block_size, base, test_result)
if different_blocks == None or len(different_blocks) == 0 or len(different_blocks) > 2:
sys.stderr.write("ERROR: Offset detection yielded inconsistent block diffs.\n")
return None
if len(different_blocks) == 2:
break
ret_val['block_size'] = block_size
ret_val['fragment_length'] = i-1
ret_val['chosen_offset'] = max(different_blocks)*block_size - ret_val['fragment_length']
return ret_val
|
umeboshi2/bletchley
|
lib/bletchley/chosen.py
|
Python
|
lgpl-3.0
| 2,851
|
"""helper for quick cross-platform makefile for sphinx
TODO: this was hacked up really quickly, could use lots of work.
"""
#===============================================================
#imports
#===============================================================
#core
import logging; log = logging.getLogger(__name__)
import os,sys
from string import Template
import subprocess
#pkg
#local
__all__ = [
"SphinxMaker",
]
#===============================================================
#misc helpers
#===============================================================
def sub(fmt, **kwds):
if not kwds:
kwds = globals()
return Template(fmt).substitute(**kwds)
#===============================================================
#fs helpers
#===============================================================
joinpath = os.path.join
def abspath(*args):
return os.path.abspath(joinpath(*args))
if hasattr(os.path, "realpath"):
def realpath(*args):
return os.path.realpath(joinpath(*args))
else:
#probably windows - fake it best we can
def realpath(*args):
return os.path.normcase(os.path.abspath(joinpath(*args)))
def pathdir(path):
return os.path.split(path)[0]
def clearpath(path):
"recursively remove all contents of dir, but leave dir"
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(joinpath(root, name))
for name in dirs:
os.rmdir(joinpath(root, name))
def rmpath(path):
"drecursively delete path"
if os.path.exists(path):
if os.path.isdir(path):
clearpath(path)
os.rmdir(path)
else:
os.remove(path)
def ensuredirs(path):
"ensure specified directory & all parents exist"
if not os.path.isdir(path):
os.makedirs(path)
#===============================================================
#main class
#===============================================================
class SphinxMaker(object):
#===============================================================
#class attrs
#===============================================================
# You can subclass these variables
SPHINXOPTS = []
SPHINXBUILD = "sphinx-build"
PAPER = "letter"
SERVEHTML_PORT = 8000
# Paths
BUILD = "_build"
SOURCE = "."
#internal opts
PAPEROPT_a4 = ["-D","latex_paper_size=a4"]
PAPEROPT_letter = ["-D","latex_paper_size=letter"]
#: list of attrs to check os.environ for overriddes.
env_vars = [ "SPHINXOPTS", "SPHINXBUILD", "PAPER", "SERVEHTML_PORT", "BUILD", "SOURCE" ]
#===============================================================
#instance attrs
#===============================================================
root_dir = None
conf_file = None
conf = None
#===============================================================
#frontend
#===============================================================
def __init__(self, root_dir=None, **kwds):
#FIXME: this may not be properly flexible.
if root_dir is None:
root_dir = joinpath(sys.modules["__main__"].__file__, os.pardir)
self.root_dir = abspath(root_dir)
self.conf_file = joinpath(self.root_dir, "conf.py")
if not os.path.exists(self.conf_file):
raise RuntimeError, "conf file not found in root: %r" % (self.root_dir)
#check environment for overrides, as well as constructor
for key in self.env_vars:
value = kwds.pop(key, None)
value = os.environ.get(key, value)
if value is not None:
t = type(getattr(self,key))
#FIXME: this is *real* hacked way to do type conversion
if isinstance(t, str):
if isinstance(t, int): #for ints, eg SERVEHTML_PORT
value = int(t)
elif isinstance(t, list): #for list of arguments, eg SPHINXOPTS
#FIXME: should use proper quote escaping logic when we split :(
value = " ".split(value)
setattr(self, key, value)
# make all relative paths relative to root dir
for name in ("BUILD", "SOURCE"):
value = getattr(self, name)
if not os.path.isabs(value):
value = abspath(self.root_dir, value)
setattr(self, name, value)
if kwds:
raise TypeError, "unknown keywords: %r" % (kwds,)
@classmethod
def execute(cls, args=None, **kwds):
return cls(**kwds).run(args)
def run(self, args=None):
if args is None:
args = sys.argv[1:]
os.chdir(self.root_dir) #due to relative paths like self.BUILD
for arg in args:
getattr(self,"target_"+arg)()
#===============================================================
#targets
#===============================================================
def target_help(self):
print "Please use \`make <target>' where <target> is one of"
print " clean remove all compiled files"
print " html to make standalone HTML files"
print " servehtml to serve standalone HTML files on port 8000"
# print " pickle to make pickle files"
# print " json to make JSON files"
print " htmlhelp to make HTML files and a HTML help project"
# print " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
# print " changes to make an overview over all changed/added/deprecated items"
# print " linkcheck to check all external links for integrity"
def target_clean(self):
rmpath(self.BUILD)
def target_html(self):
self.build("html")
def target_htmlhelp(self):
self.build("htmlhelp")
def target_servehtml(self):
path = realpath(self.BUILD, "html")
port = self.SERVEHTML_PORT
# try to use paste
try:
from paste.httpserver import serve
from paste.urlparser import StaticURLParser
except ImportError:
# fall back to stdlib server
import SimpleHTTPServer as s
os.chdir(path)
print "Serving files from %r on port %r" % (path, port)
s.BaseHTTPServer.HTTPServer(('',port), s.SimpleHTTPRequestHandler).serve_forever()
else:
serve(StaticURLParser(path), host="0.0.0.0", port=port)
#TODO: support latex, pdf, etc...
##def target_latex(self):
## build("latex")
## print "Run \`make all-pdf' or \`make all-ps' in that directory to" \
## "run these through (pdf)latex."
##
##def target_pdf():
## assert os.name == "posix", "pdf build support not automated for your os"
## build("latex")
## target = BUILD / "latex"
## target.chdir()
## subprocess.call(['make', 'all-pdf'])
## print "pdf built"
#===============================================================
#helpers
#===============================================================
def build(self, name):
BUILD = self.BUILD
ALLSPHINXOPTS = self.get_sphinx_opts()
dt = joinpath(BUILD, "doctrees")
ensuredirs(dt)
target = joinpath(BUILD, name)
ensuredirs(target)
rc = subprocess.call([self.SPHINXBUILD, "-b", name] + ALLSPHINXOPTS + [ target ])
if rc:
print "Sphinx-Build returned error, exiting."
sys.exit(rc)
print "Build finished. The %s pages are in %r." % (name, target,)
return target
def get_paper_opts(self):
return getattr(self,"PAPER_" + self.PAPER, [])
def get_sphinx_opts(self):
return ["-d", joinpath(self.BUILD, "doctrees")] + self.get_paper_opts() + self.SPHINXOPTS + [ self.SOURCE ]
#===============================================================
#eoc
#===============================================================
class ProjectSphinxMaker(SphinxMaker):
"SphinxMaker variant which more usefully integrates into setup.py of a python project"
#TODO: make this read setup.cfg etc to see where build_sphinx *actually* puts things
BUILD = os.path.join(os.pardir, "build", "sphinx")
#===============================================================
#eof
#===============================================================
|
loriab/qcdb
|
docs/source/cloud_sptheme/make_helper.py
|
Python
|
lgpl-3.0
| 8,496
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
)
from ..utils import (
dict_get,
ExtractorError,
float_or_none,
int_or_none,
try_get,
strip_or_none,
unified_timestamp,
update_url_query,
url_or_none,
xpath_text,
)
from .periscope import (
PeriscopeBaseIE,
PeriscopeIE,
)
class TwitterBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitter.com/1.1/'
_BASE_REGEX = r'https?://(?:(?:www|m(?:obile)?)\.)?twitter\.com/'
_GUEST_TOKEN = None
def _extract_variant_formats(self, variant, video_id):
variant_url = variant.get('url')
if not variant_url:
return []
elif '.m3u8' in variant_url:
return self._extract_m3u8_formats(
variant_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
else:
tbr = int_or_none(dict_get(variant, ('bitrate', 'bit_rate')), 1000) or None
f = {
'url': variant_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
}
self._search_dimensions_in_video_url(f, variant_url)
return [f]
def _extract_formats_from_vmap_url(self, vmap_url, video_id):
vmap_url = url_or_none(vmap_url)
if not vmap_url:
return []
vmap_data = self._download_xml(vmap_url, video_id)
formats = []
urls = []
for video_variant in vmap_data.findall('.//{http://twitter.com/schema/videoVMapV2.xsd}videoVariant'):
video_variant.attrib['url'] = compat_urllib_parse_unquote(
video_variant.attrib['url'])
urls.append(video_variant.attrib['url'])
formats.extend(self._extract_variant_formats(
video_variant.attrib, video_id))
video_url = strip_or_none(xpath_text(vmap_data, './/MediaFile'))
if video_url not in urls:
formats.extend(self._extract_variant_formats({'url': video_url}, video_id))
return formats
@staticmethod
def _search_dimensions_in_video_url(a_format, video_url):
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
a_format.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
def _call_api(self, path, video_id, query={}):
headers = {
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw',
}
if not self._GUEST_TOKEN:
self._GUEST_TOKEN = self._download_json(
self._API_BASE + 'guest/activate.json', video_id,
'Downloading guest token', data=b'',
headers=headers)['guest_token']
headers['x-guest-token'] = self._GUEST_TOKEN
try:
return self._download_json(
self._API_BASE + path, video_id, headers=headers, query=query)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
raise ExtractorError(self._parse_json(
e.cause.read().decode(),
video_id)['errors'][0]['message'], expected=True)
raise
class TwitterCardIE(InfoExtractor):
IE_NAME = 'twitter:card'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/(?:cards/tfw/v1|videos(?:/tweet)?)/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
# MD5 checksums are different in different places
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': "Twitter - You can now shoot, edit and share video on Twitter. Capture life's most moving moments from your perspective.",
'description': 'md5:18d3e24bb4f6e5007487dd546e53bd96',
'uploader': 'Twitter',
'uploader_id': 'Twitter',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 30.033,
'timestamp': 1422366112,
'upload_date': '20150127',
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7137eca597f72b9abbe61e5ae0161399',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': "NASA - Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video.",
'description': "Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video. https://t.co/BJYgOjSeGA",
'uploader': 'NASA',
'uploader_id': 'NASA',
'timestamp': 1437408129,
'upload_date': '20150720',
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6d9683dd3f48e340ded81c0e917ad46',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'md5:a831e97fa384863d6e26ce48d1c43376',
'upload_date': '20111013',
'uploader': 'OMG! UBUNTU!',
'uploader_id': 'omgubuntu',
},
'add_ie': ['Youtube'],
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
'md5': '6dabeaca9e68cbb71c99c322a4b42a11',
'info_dict': {
'id': 'iBb2x00UVlv',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
'uploader': 'ArsenalTerje',
'title': 'Vine by ArsenalTerje',
'timestamp': 1447451307,
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/i/videos/tweet/705235433198714880',
'md5': '884812a2adc8aaf6fe52b15ccbfa3b88',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.",
'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns",
'uploader': 'Brent Yarina',
'uploader_id': 'BTNBrentYarina',
'timestamp': 1456976204,
'upload_date': '20160303',
},
'skip': 'This content is no longer available.',
}, {
'url': 'https://twitter.com/i/videos/752274308186120192',
'only_matching': True,
},
]
def _real_extract(self, url):
status_id = self._match_id(url)
return self.url_result(
'https://twitter.com/statuses/' + status_id,
TwitterIE.ie_key(), status_id)
class TwitterIE(TwitterBaseIE):
IE_NAME = 'twitter'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'(?:(?:i/web|[^/]+)/status|statuses)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
'duration': 12.922,
'timestamp': 1442188653,
'upload_date': '20150913',
'age_limit': 18,
},
}, {
'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
'info_dict': {
'id': '657991469417025536',
'ext': 'mp4',
'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
'thumbnail': r're:^https?://.*\.png',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
'expected_warnings': ['height', 'width'],
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'info_dict': {
'id': '665052190608723968',
'ext': 'mp4',
'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.',
'description': 'A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens. https://t.co/OkSqT2fjWJ',
'uploader_id': 'starwars',
'uploader': 'Star Wars',
'timestamp': 1447395772,
'upload_date': '20151113',
},
}, {
'url': 'https://twitter.com/BTNBrentYarina/status/705235433198714880',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.",
'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns",
'uploader_id': 'BTNBrentYarina',
'uploader': 'Brent Yarina',
'timestamp': 1456976204,
'upload_date': '20160303',
},
'params': {
# The same video as https://twitter.com/i/videos/tweet/705235433198714880
# Test case of TwitterCardIE
'skip_download': True,
},
}, {
'url': 'https://twitter.com/jaydingeer/status/700207533655363584',
'info_dict': {
'id': '700207533655363584',
'ext': 'mp4',
'title': 'simon vertugo - BEAT PROD: @suhmeduh #Damndaniel',
'description': 'BEAT PROD: @suhmeduh https://t.co/HBrQ4AfpvZ #Damndaniel https://t.co/byBooq2ejZ',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'simon vertugo',
'uploader_id': 'simonvertugo',
'duration': 30.0,
'timestamp': 1455777459,
'upload_date': '20160218',
},
}, {
'url': 'https://twitter.com/Filmdrunk/status/713801302971588609',
'md5': '89a15ed345d13b86e9a5a5e051fa308a',
'info_dict': {
'id': 'MIOxnrUteUd',
'ext': 'mp4',
'title': 'Dr.Pepperの飲み方 #japanese #バカ #ドクペ #電動ガン',
'uploader': 'TAKUMA',
'uploader_id': '1004126642786242560',
'timestamp': 1402826626,
'upload_date': '20140615',
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/captainamerica/status/719944021058060289',
'info_dict': {
'id': '719944021058060289',
'ext': 'mp4',
'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theaters.',
'description': '@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI',
'uploader_id': 'CaptainAmerica',
'uploader': 'Captain America',
'duration': 3.17,
'timestamp': 1460483005,
'upload_date': '20160412',
},
}, {
'url': 'https://twitter.com/OPP_HSD/status/779210622571536384',
'info_dict': {
'id': '1zqKVVlkqLaKB',
'ext': 'mp4',
'title': 'Sgt Kerry Schmidt - Ontario Provincial Police - Road rage, mischief, assault, rollover and fire in one occurrence',
'upload_date': '20160923',
'uploader_id': '1PmKqpJdOJQoY',
'uploader': 'Sgt Kerry Schmidt - Ontario Provincial Police',
'timestamp': 1474613214,
},
'add_ie': ['Periscope'],
}, {
# has mp4 formats via mobile API
'url': 'https://twitter.com/news_al3alm/status/852138619213144067',
'info_dict': {
'id': '852138619213144067',
'ext': 'mp4',
'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة',
'description': 'كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN',
'uploader': 'عالم الأخبار',
'uploader_id': 'news_al3alm',
'duration': 277.4,
'timestamp': 1492000653,
'upload_date': '20170412',
},
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/i/web/status/910031516746514432',
'info_dict': {
'id': '910031516746514432',
'ext': 'mp4',
'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre.',
'thumbnail': r're:^https?://.*\.jpg',
'description': '[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo',
'uploader': 'Préfet de Guadeloupe',
'uploader_id': 'Prefet971',
'duration': 47.48,
'timestamp': 1505803395,
'upload_date': '20170919',
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
# card via api.twitter.com/1.1/videos/tweet/config
'url': 'https://twitter.com/LisPower1/status/1001551623938805763',
'info_dict': {
'id': '1001551623938805763',
'ext': 'mp4',
'title': 're:.*?Shep is on a roll today.*?',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:37b9f2ff31720cef23b2bd42ee8a0f09',
'uploader': 'Lis Power',
'uploader_id': 'LisPower1',
'duration': 111.278,
'timestamp': 1527623489,
'upload_date': '20180529',
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
'url': 'https://twitter.com/foobar/status/1087791357756956680',
'info_dict': {
'id': '1087791357756956680',
'ext': 'mp4',
'title': 'Twitter - A new is coming. Some of you got an opt-in to try it now. Check out the emoji button, quick keyboard shortcuts, upgraded trends, advanced search, and more. Let us know your thoughts!',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:6dfd341a3310fb97d80d2bf7145df976',
'uploader': 'Twitter',
'uploader_id': 'Twitter',
'duration': 61.567,
'timestamp': 1548184644,
'upload_date': '20190122',
},
}, {
# not available in Periscope
'url': 'https://twitter.com/ViviEducation/status/1136534865145286656',
'info_dict': {
'id': '1vOGwqejwoWxB',
'ext': 'mp4',
'title': 'Vivi - Vivi founder @lior_rauchy announcing our new student feedback tool live at @EduTECH_AU #EduTECH2019',
'uploader': 'Vivi',
'uploader_id': '1eVjYOLGkGrQL',
},
'add_ie': ['TwitterBroadcast'],
}, {
# unified card
'url': 'https://twitter.com/BrooklynNets/status/1349794411333394432?s=20',
'info_dict': {
'id': '1349794411333394432',
'ext': 'mp4',
'title': 'md5:d1c4941658e4caaa6cb579260d85dcba',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:71ead15ec44cee55071547d6447c6a3e',
'uploader': 'Brooklyn Nets',
'uploader_id': 'BrooklynNets',
'duration': 324.484,
'timestamp': 1610651040,
'upload_date': '20210114',
},
'params': {
'skip_download': True,
},
}, {
# Twitch Clip Embed
'url': 'https://twitter.com/GunB1g/status/1163218564784017422',
'only_matching': True,
}, {
# promo_video_website card
'url': 'https://twitter.com/GunB1g/status/1163218564784017422',
'only_matching': True,
}, {
# promo_video_convo card
'url': 'https://twitter.com/poco_dandy/status/1047395834013384704',
'only_matching': True,
}, {
# appplayer card
'url': 'https://twitter.com/poco_dandy/status/1150646424461176832',
'only_matching': True,
}, {
# video_direct_message card
'url': 'https://twitter.com/qarev001/status/1348948114569269251',
'only_matching': True,
}, {
# poll2choice_video card
'url': 'https://twitter.com/CAF_Online/status/1349365911120195585',
'only_matching': True,
}, {
# poll3choice_video card
'url': 'https://twitter.com/SamsungMobileSA/status/1348609186725289984',
'only_matching': True,
}, {
# poll4choice_video card
'url': 'https://twitter.com/SouthamptonFC/status/1347577658079641604',
'only_matching': True,
}]
def _real_extract(self, url):
twid = self._match_id(url)
status = self._call_api(
'statuses/show/%s.json' % twid, twid, {
'cards_platform': 'Web-12',
'include_cards': 1,
'include_reply_count': 1,
'include_user_entities': 0,
'tweet_mode': 'extended',
})
title = description = status['full_text'].replace('\n', ' ')
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
title = re.sub(r'\s+(https?://[^ ]+)', '', title)
user = status.get('user') or {}
uploader = user.get('name')
if uploader:
title = '%s - %s' % (uploader, title)
uploader_id = user.get('screen_name')
tags = []
for hashtag in (try_get(status, lambda x: x['entities']['hashtags'], list) or []):
hashtag_text = hashtag.get('text')
if not hashtag_text:
continue
tags.append(hashtag_text)
info = {
'id': twid,
'title': title,
'description': description,
'uploader': uploader,
'timestamp': unified_timestamp(status.get('created_at')),
'uploader_id': uploader_id,
'uploader_url': 'https://twitter.com/' + uploader_id if uploader_id else None,
'like_count': int_or_none(status.get('favorite_count')),
'repost_count': int_or_none(status.get('retweet_count')),
'comment_count': int_or_none(status.get('reply_count')),
'age_limit': 18 if status.get('possibly_sensitive') else 0,
'tags': tags,
}
def extract_from_video_info(media):
video_info = media.get('video_info') or {}
formats = []
for variant in video_info.get('variants', []):
formats.extend(self._extract_variant_formats(variant, twid))
self._sort_formats(formats)
thumbnails = []
media_url = media.get('media_url_https') or media.get('media_url')
if media_url:
def add_thumbnail(name, size):
thumbnails.append({
'id': name,
'url': update_url_query(media_url, {'name': name}),
'width': int_or_none(size.get('w') or size.get('width')),
'height': int_or_none(size.get('h') or size.get('height')),
})
for name, size in media.get('sizes', {}).items():
add_thumbnail(name, size)
add_thumbnail('orig', media.get('original_info') or {})
info.update({
'formats': formats,
'thumbnails': thumbnails,
'duration': float_or_none(video_info.get('duration_millis'), 1000),
})
media = try_get(status, lambda x: x['extended_entities']['media'][0])
if media and media.get('type') != 'photo':
extract_from_video_info(media)
else:
card = status.get('card')
if card:
binding_values = card['binding_values']
def get_binding_value(k):
o = binding_values.get(k) or {}
return try_get(o, lambda x: x[x['type'].lower() + '_value'])
card_name = card['name'].split(':')[-1]
if card_name == 'player':
info.update({
'_type': 'url',
'url': get_binding_value('player_url'),
})
elif card_name == 'periscope_broadcast':
info.update({
'_type': 'url',
'url': get_binding_value('url') or get_binding_value('player_url'),
'ie_key': PeriscopeIE.ie_key(),
})
elif card_name == 'broadcast':
info.update({
'_type': 'url',
'url': get_binding_value('broadcast_url'),
'ie_key': TwitterBroadcastIE.ie_key(),
})
elif card_name == 'summary':
info.update({
'_type': 'url',
'url': get_binding_value('card_url'),
})
elif card_name == 'unified_card':
media_entities = self._parse_json(get_binding_value('unified_card'), twid)['media_entities']
extract_from_video_info(next(iter(media_entities.values())))
# amplify, promo_video_website, promo_video_convo, appplayer,
# video_direct_message, poll2choice_video, poll3choice_video,
# poll4choice_video, ...
else:
is_amplify = card_name == 'amplify'
vmap_url = get_binding_value('amplify_url_vmap') if is_amplify else get_binding_value('player_stream_url')
content_id = get_binding_value('%s_content_id' % (card_name if is_amplify else 'player'))
formats = self._extract_formats_from_vmap_url(vmap_url, content_id or twid)
self._sort_formats(formats)
thumbnails = []
for suffix in ('_small', '', '_large', '_x_large', '_original'):
image = get_binding_value('player_image' + suffix) or {}
image_url = image.get('url')
if not image_url or '/player-placeholder' in image_url:
continue
thumbnails.append({
'id': suffix[1:] if suffix else 'medium',
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
info.update({
'formats': formats,
'thumbnails': thumbnails,
'duration': int_or_none(get_binding_value(
'content_duration_seconds')),
})
else:
expanded_url = try_get(status, lambda x: x['entities']['urls'][0]['expanded_url'])
if not expanded_url:
raise ExtractorError("There's no video in this tweet.")
info.update({
'_type': 'url',
'url': expanded_url,
})
return info
class TwitterAmplifyIE(TwitterBaseIE):
IE_NAME = 'twitter:amplify'
_VALID_URL = r'https?://amp\.twimg\.com/v/(?P<id>[0-9a-f\-]{36})'
_TEST = {
'url': 'https://amp.twimg.com/v/0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'md5': '7df102d0b9fd7066b86f3159f8e81bf6',
'info_dict': {
'id': '0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'ext': 'mp4',
'title': 'Twitter Video',
'thumbnail': 're:^https?://.*',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vmap_url = self._html_search_meta(
'twitter:amplify:vmap', webpage, 'vmap url')
formats = self._extract_formats_from_vmap_url(vmap_url, video_id)
thumbnails = []
thumbnail = self._html_search_meta(
'twitter:image:src', webpage, 'thumbnail', fatal=False)
def _find_dimension(target):
w = int_or_none(self._html_search_meta(
'twitter:%s:width' % target, webpage, fatal=False))
h = int_or_none(self._html_search_meta(
'twitter:%s:height' % target, webpage, fatal=False))
return w, h
if thumbnail:
thumbnail_w, thumbnail_h = _find_dimension('image')
thumbnails.append({
'url': thumbnail,
'width': thumbnail_w,
'height': thumbnail_h,
})
video_w, video_h = _find_dimension('player')
formats[0].update({
'width': video_w,
'height': video_h,
})
return {
'id': video_id,
'title': 'Twitter Video',
'formats': formats,
'thumbnails': thumbnails,
}
class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
IE_NAME = 'twitter:broadcast'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
_TEST = {
# untitled Periscope video
'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
'info_dict': {
'id': '1yNGaQLWpejGj',
'ext': 'mp4',
'title': 'Andrea May Sahouri - Periscope Broadcast',
'uploader': 'Andrea May Sahouri',
'uploader_id': '1PXEdBZWpGwKe',
},
}
def _real_extract(self, url):
broadcast_id = self._match_id(url)
broadcast = self._call_api(
'broadcasts/show.json', broadcast_id,
{'ids': broadcast_id})['broadcasts'][broadcast_id]
info = self._parse_broadcast_data(broadcast, broadcast_id)
media_key = broadcast['media_key']
source = self._call_api(
'live_video_stream/status/' + media_key, media_key)['source']
m3u8_url = source.get('noRedirectPlaybackUrl') or source['location']
if '/live_video_stream/geoblocked/' in m3u8_url:
self.raise_geo_restricted()
m3u8_id = compat_parse_qs(compat_urllib_parse_urlparse(
m3u8_url).query).get('type', [None])[0]
state, width, height = self._extract_common_format_info(broadcast)
info['formats'] = self._extract_pscp_m3u8_formats(
m3u8_url, broadcast_id, m3u8_id, state, width, height)
return info
|
rg3/youtube-dl
|
youtube_dl/extractor/twitter.py
|
Python
|
unlicense
| 28,035
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import user_docs
import unittest
class IndexSelectionTests(mango.UserDocsTests):
@classmethod
def setUpClass(klass):
super(IndexSelectionTests, klass).setUpClass()
if mango.has_text_service():
user_docs.add_text_indexes(klass.db, {})
def test_basic(self):
resp = self.db.find({"name.last": "A last name"}, explain=True)
assert resp["index"]["type"] == "json"
def test_with_and(self):
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "This doesn't have to match anything."
}, explain=True)
assert resp["index"]["type"] == "json"
@unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_with_text(self):
resp = self.db.find({
"$text" : "Stephanie",
"name.first": "Stephanie",
"name.last": "This doesn't have to match anything."
}, explain=True)
assert resp["index"]["type"] == "text"
@unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_no_view_index(self):
resp = self.db.find({"name.first": "Ohai!"}, explain=True)
assert resp["index"]["type"] == "text"
@unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_with_or(self):
resp = self.db.find({
"$or": [
{"name.first": "Stephanie"},
{"name.last": "This doesn't have to match anything."}
]
}, explain=True)
assert resp["index"]["type"] == "text"
def test_use_most_columns(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "Something or other",
"age": {"$gt": 1}
}, explain=True)
assert resp["index"]["ddoc"] != "_design/" + ddocid
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "Something or other",
"age": {"$gt": 1}
}, use_index=ddocid, explain=True)
assert resp["index"]["ddoc"] == ddocid
def test_use_most_columns(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
try:
self.db.find({}, use_index=ddocid)
except Exception, e:
assert e.response.status_code == 400
else:
raise AssertionError("bad find")
# This doc will not be saved given the new ddoc validation code
# in couch_mrview
def test_manual_bad_view_idx01(self):
design_doc = {
"_id": "_design/bad_view_index",
"language": "query",
"views": {
"queryidx1": {
"map": {
"fields": {
"age": "asc"
}
},
"reduce": "_count",
"options": {
"def": {
"fields": [
{
"age": "asc"
}
]
},
"w": 2
}
}
},
"views" : {
"views001" : {
"map" : "function(employee){if(employee.training)"
+ "{emit(employee.number, employee.training);}}"
}
}
}
with self.assertRaises(KeyError):
self.db.save_doc(design_doc)
@unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_manual_bad_text_idx(self):
design_doc = {
"_id": "_design/bad_text_index",
"language": "query",
"indexes": {
"text_index": {
"default_analyzer": "keyword",
"default_field": {},
"selector": {},
"fields": "all_fields",
"analyzer": {
"name": "perfield",
"default": "keyword",
"fields": {
"$default": "standard"
}
}
}
},
"indexes": {
"st_index": {
"analyzer": "standard",
"index": "function(doc){\n index(\"st_index\", doc.geometry);\n}"
}
}
}
self.db.save_doc(design_doc)
docs= self.db.find({"age" : 48})
assert len(docs) == 1
assert docs[0]["name"]["first"] == "Stephanie"
assert docs[0]["age"] == 48
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class MultiTextIndexSelectionTests(mango.UserDocsTests):
@classmethod
def setUpClass(klass):
super(MultiTextIndexSelectionTests, klass).setUpClass()
if mango.has_text_service():
klass.db.create_text_index(ddoc="foo", analyzer="keyword")
klass.db.create_text_index(ddoc="bar", analyzer="email")
def test_view_ok_with_multi_text(self):
resp = self.db.find({"name.last": "A last name"}, explain=True)
assert resp["index"]["type"] == "json"
def test_multi_text_index_is_error(self):
try:
self.db.find({"$text": "a query"}, explain=True)
except Exception, e:
assert e.response.status_code == 400
def test_use_index_works(self):
resp = self.db.find({"$text": "a query"}, use_index="foo", explain=True)
assert resp["index"]["ddoc"] == "_design/foo"
|
cloudant/couchdb-mango
|
test/05-index-selection-test.py
|
Python
|
apache-2.0
| 6,472
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
if file_reader_job == 'coordinator':
file_reader_device = '/job:coordinator/task:0'
else:
file_reader_device = '/job:%s' % file_reader_job
with ops.device(file_reader_device):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(source_dataset),
dataset_ops.get_legacy_output_shapes(source_dataset))
return remote_iterator.get_next()
def MapFn(unused_input):
source_dataset_output_types = dataset_ops.get_legacy_output_types(
source_dataset)
if isinstance(source_dataset_output_types, dtypes.DType):
output_types = [source_dataset_output_types]
elif isinstance(source_dataset_output_types, (list, tuple)):
output_types = source_dataset_output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
|
chemelnucfin/tensorflow
|
tensorflow/python/tpu/datasets.py
|
Python
|
apache-2.0
| 7,774
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Execute, File
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
from resource_management.libraries.functions.show_logs import show_logs
def kill_process(pid_file, user, log_dir):
import params
"""
Kill the process by pid file, then check the process is running or not. If the process is still running after the kill
command, it will try to kill with -9 option (hard kill)
"""
pid = get_user_call_output(format("cat {pid_file}"), user=user, is_checked_call=False)[1]
process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
kill_cmd = format("{sudo} kill {pid}")
Execute(kill_cmd,
not_if=format("! ({process_id_exists_command})"))
wait_time = 5
hard_kill_cmd = format("{sudo} kill -9 {pid}")
Execute(hard_kill_cmd,
not_if=format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
ignore_failures = True)
try:
Execute(format("! ({process_id_exists_command})"),
tries=20,
try_sleep=3,
)
except:
show_logs(log_dir, user)
raise
File(pid_file,
action="delete"
)
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.4/services/LOGSEARCH/package/scripts/logsearch_common.py
|
Python
|
apache-2.0
| 2,098
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from itertools import groupby, chain
from future.moves.itertools import zip_longest
from devlib.utils.types import identifier
from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
JobGenerator, settings)
from wa.framework.configuration.parsers import ConfigParser
from wa.framework.configuration.plugin_cache import PluginCache
from wa.framework.exception import NotFoundError, ConfigError
from wa.framework.job import Job
from wa.utils import log
from wa.utils.serializer import Podable
class CombinedConfig(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
instance = super(CombinedConfig, CombinedConfig).from_pod(pod)
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
return instance
def __init__(self, settings=None, run_config=None): # pylint: disable=redefined-outer-name
super(CombinedConfig, self).__init__()
self.settings = settings
self.run_config = run_config
def to_pod(self):
pod = super(CombinedConfig, self).to_pod()
pod['settings'] = self.settings.to_pod()
pod['run_config'] = self.run_config.to_pod()
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
class ConfigManager(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
@property
def enabled_instruments(self):
return self.jobs_config.enabled_instruments
@property
def enabled_processors(self):
return self.jobs_config.enabled_processors
@property
def job_specs(self):
if not self._jobs_generated:
msg = 'Attempting to access job specs before '\
'jobs have been generated'
raise RuntimeError(msg)
return [j.spec for j in self._jobs]
@property
def jobs(self):
if not self._jobs_generated:
msg = 'Attempting to access jobs before '\
'they have been generated'
raise RuntimeError(msg)
return self._jobs
def __init__(self, settings=settings): # pylint: disable=redefined-outer-name
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self.loaded_config_sources = []
self._config_parser = ConfigParser()
self._jobs = []
self._jobs_generated = False
self.agenda = None
def load_config_file(self, filepath):
includes = self._config_parser.load_from_path(self, filepath)
self.loaded_config_sources.append(filepath)
self.loaded_config_sources.extend(includes)
def load_config(self, values, source):
self._config_parser.load(self, values, source)
self.loaded_config_sources.append(source)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
return self.plugin_cache.get_plugin(identifier(name), kind, *args, **kwargs)
def get_instruments(self, target):
instruments = []
for name in self.enabled_instruments:
try:
instruments.append(self.get_plugin(name, kind='instrument',
target=target))
except NotFoundError:
msg = 'Instrument "{}" not found'
raise NotFoundError(msg.format(name))
return instruments
def get_processors(self):
processors = []
for name in self.enabled_processors:
try:
proc = self.plugin_cache.get_plugin(name, kind='output_processor')
except NotFoundError:
msg = 'Output Processor "{}" not found'
raise NotFoundError(msg.format(name))
processors.append(proc)
return processors
def get_config(self):
return CombinedConfig(self.settings, self.run_config)
def finalize(self):
if not self.agenda:
msg = 'Attempting to finalize config before agenda has been set'
raise RuntimeError(msg)
self.run_config.merge_device_config(self.plugin_cache)
return self.get_config()
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
if not job_specs:
msg = 'No jobs available for running.'
raise ConfigError(msg)
exec_order = self.run_config.execution_order
log.indent()
for spec, i in permute_iterations(job_specs, exec_order):
job = Job(spec, i, context)
job.load(context.tm.target)
self._jobs.append(job)
context.run_state.add_job(job)
log.dedent()
self._jobs_generated = True
def permute_by_workload(specs):
"""
This is that "classic" implementation that executes all iterations of a
workload spec before proceeding onto the next spec.
"""
for spec in specs:
for i in range(1, spec.iterations + 1):
yield (spec, i)
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
def permute_randomly(specs):
"""
This will generate a random permutation of specs/iteration tuples.
"""
result = []
for spec in specs:
for i in range(1, spec.iterations + 1):
result.append((spec, i))
random.shuffle(result)
for t in result:
yield t
permute_map = {
'by_iteration': permute_by_iteration,
'by_workload': permute_by_workload,
'by_section': permute_by_section,
'random': permute_randomly,
}
def permute_iterations(specs, exec_order):
if exec_order not in permute_map:
msg = 'Unknown execution order "{}"; must be in: {}'
raise ValueError(msg.format(exec_order, list(permute_map.keys())))
return permute_map[exec_order](specs)
|
setrofim/workload-automation
|
wa/framework/configuration/execution.py
|
Python
|
apache-2.0
| 8,536
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small images classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
rabipanda/tensorflow
|
tensorflow/python/keras/_impl/keras/datasets/cifar10.py
|
Python
|
apache-2.0
| 2,091
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
LVM.LVM_CMD_PREFIX = ['env',
'LC_ALL=C',
'LVM_SYSTEM_DIR=' + lvm_sys_dir]
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_LE('Unable to locate Volume Group %s'), vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_vg_uuid(self):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'uuid', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
return out.split()
else:
return []
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error querying thin pool about data_percent'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info(_LI("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"),
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self._root_helper,
self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_LE('Unable to find VG: %s'), self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str,
vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n',
name, pool_path]
else:
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name,
'-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
'%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating snapshot'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def activate_lv(self, name, is_snapshot=False, permanent=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:param is_snapshot: whether LV is a snapshot
:param permanent: whether we should drop skipactivation flag
:raises: putils.ProcessExecutionError
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
# for 'o' or 'O'
if self.lv_has_snapshot(lv_name):
self.deactivate_lv(lv_name)
try:
cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name)]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error renaming logical volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
|
apporc/cinder
|
cinder/brick/local_dev/lvm.py
|
Python
|
apache-2.0
| 30,849
|
"""The tests for hls streams."""
from datetime import timedelta
from urllib.parse import urlparse
import pytest
from homeassistant.components.stream import request_stream
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
@pytest.mark.skip("Flaky in CI")
async def test_hls_stream(hass, hass_client):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + playlist.splitlines()[-1][1:]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessable after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == 404
@pytest.mark.skip("Flaky in CI")
async def test_stream_timeout(hass, hass_client):
"""Test hls stream timeout."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessable
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == 404
@pytest.mark.skip("Flaky in CI")
async def test_stream_ended(hass):
"""Test hls stream packets ended."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
# Request stream
request_stream(hass, source)
# Run it dead
segments = 0
while await track.recv() is not None:
segments += 1
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
|
leppa/home-assistant
|
tests/components/stream/test_hls.py
|
Python
|
apache-2.0
| 3,295
|
# Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import threading
import time
import weakref
from dogpile.cache import api
from dogpile.cache import proxy
from dogpile.cache import region
from dogpile.cache import util as dogpile_util
from dogpile.core import nameregistry
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
__all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
'get_key_value_store')
BACKENDS_REGISTERED = False
CONF = cfg.CONF
KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary()
LOCK_WINDOW = 1
LOG = log.getLogger(__name__)
NO_VALUE = api.NO_VALUE
def _register_backends():
# NOTE(morganfainberg): This function exists to ensure we do not try and
# register the backends prior to the configuration object being fully
# available. We also need to ensure we do not register a given backend
# more than one time. All backends will be prefixed with openstack.kvs
# as the "short" name to reference them for configuration purposes. This
# function is used in addition to the pre-registered backends in the
# __init__ file for the KVS system.
global BACKENDS_REGISTERED
if not BACKENDS_REGISTERED:
prefix = 'openstack.kvs.%s'
for backend in CONF.kvs.backends:
module, cls = backend.rsplit('.', 1)
backend_name = prefix % cls
LOG.debug(('Registering Dogpile Backend %(backend_path)s as '
'%(backend_name)s'),
{'backend_path': backend, 'backend_name': backend_name})
region.register_backend(backend_name, module, cls)
BACKENDS_REGISTERED = True
class LockTimeout(exception.UnexpectedError):
debug_message_format = _('Lock Timeout occurred for key, %(target)s')
class KeyValueStore(object):
"""Basic KVS manager object to support Keystone Key-Value-Store systems.
This manager also supports the concept of locking a given key resource to
allow for a guaranteed atomic transaction to the backend.
"""
def __init__(self, kvs_region):
self.locking = True
self._lock_timeout = 0
self._region = kvs_region
self._security_strategy = None
self._secret_key = None
self._lock_registry = nameregistry.NameRegistry(self._create_mutex)
def configure(self, backing_store, key_mangler=None, proxy_list=None,
locking=True, **region_config_args):
"""Configure the KeyValueStore instance.
:param backing_store: dogpile.cache short name of the region backend
:param key_mangler: key_mangler function
:param proxy_list: list of proxy classes to apply to the region
:param locking: boolean that allows disabling of locking mechanism for
this instantiation
:param region_config_args: key-word args passed to the dogpile.cache
backend for configuration
:return:
"""
if self.is_configured:
# NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
# there are a lot of pitfalls and potential memory leaks that could
# occur. By far the best approach is to re-create the KVS object
# with the new configuration.
raise RuntimeError(_('KVS region %s is already configured. '
'Cannot reconfigure.') % self._region.name)
self.locking = locking
self._lock_timeout = region_config_args.pop(
'lock_timeout', CONF.kvs.default_lock_timeout)
self._configure_region(backing_store, **region_config_args)
self._set_key_mangler(key_mangler)
self._apply_region_proxy(proxy_list)
@property
def is_configured(self):
return 'backend' in self._region.__dict__
def _apply_region_proxy(self, proxy_list):
if isinstance(proxy_list, list):
proxies = []
for item in proxy_list:
if isinstance(item, str):
LOG.debug('Importing class %s as KVS proxy.', item)
pxy = importutils.import_class(item)
else:
pxy = item
if issubclass(pxy, proxy.ProxyBackend):
proxies.append(pxy)
else:
LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
pxy.__name__)
for proxy_cls in reversed(proxies):
LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
{'proxy': proxy_cls.__name__,
'name': self._region.name})
self._region.wrap(proxy_cls)
def _assert_configured(self):
if'backend' not in self._region.__dict__:
raise exception.UnexpectedError(_('Key Value Store not '
'configured: %s'),
self._region.name)
def _set_keymangler_on_backend(self, key_mangler):
try:
self._region.backend.key_mangler = key_mangler
except Exception as e:
# NOTE(morganfainberg): The setting of the key_mangler on the
# backend is used to allow the backend to
# calculate a hashed key value as needed. Not all backends
# require the ability to calculate hashed keys. If the
# backend does not support/require this feature log a
# debug line and move on otherwise raise the proper exception.
# Support of the feature is implied by the existence of the
# 'raw_no_expiry_keys' attribute.
if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
LOG.debug(('Non-expiring keys not supported/required by '
'%(region)s backend; unable to set '
'key_mangler for backend: %(err)s'),
{'region': self._region.name, 'err': e})
else:
raise
def _set_key_mangler(self, key_mangler):
# Set the key_mangler that is appropriate for the given region being
# configured here. The key_mangler function is called prior to storing
# the value(s) in the backend. This is to help prevent collisions and
# limit issues such as memcache's limited cache_key size.
use_backend_key_mangler = getattr(self._region.backend,
'use_backend_key_mangler', False)
if ((key_mangler is None or use_backend_key_mangler) and
(self._region.backend.key_mangler is not None)):
# NOTE(morganfainberg): Use the configured key_mangler as a first
# choice. Second choice would be the key_mangler defined by the
# backend itself. Finally, fall back to the defaults. The one
# exception is if the backend defines `use_backend_key_mangler`
# as True, which indicates the backend's key_mangler should be
# the first choice.
key_mangler = self._region.backend.key_mangler
if CONF.kvs.enable_key_mangler:
if key_mangler is not None:
msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
if callable(key_mangler):
self._region.key_mangler = key_mangler
LOG.info(msg, {'func': key_mangler.__name__,
'name': self._region.name})
else:
# NOTE(morganfainberg): We failed to set the key_mangler,
# we should error out here to ensure we aren't causing
# key-length or collision issues.
raise exception.ValidationError(
_('`key_mangler` option must be a function reference'))
else:
LOG.info(_LI('Using default dogpile sha1_mangle_key as KVS '
'region %s key_mangler'), self._region.name)
# NOTE(morganfainberg): Sane 'default' keymangler is the
# dogpile sha1_mangle_key function. This ensures that unless
# explicitly changed, we mangle keys. This helps to limit
# unintended cases of exceeding cache-key in backends such
# as memcache.
self._region.key_mangler = dogpile_util.sha1_mangle_key
self._set_keymangler_on_backend(self._region.key_mangler)
else:
LOG.info(_LI('KVS region %s key_mangler disabled.'),
self._region.name)
self._set_keymangler_on_backend(None)
def _configure_region(self, backend, **config_args):
prefix = CONF.kvs.config_prefix
conf_dict = {}
conf_dict['%s.backend' % prefix] = backend
if 'distributed_lock' not in config_args:
config_args['distributed_lock'] = True
config_args['lock_timeout'] = self._lock_timeout
# NOTE(morganfainberg): To mitigate race conditions on comparing
# the timeout and current time on the lock mutex, we are building
# in a static 1 second overlap where the lock will still be valid
# in the backend but not from the perspective of the context
# manager. Since we must develop to the lowest-common-denominator
# when it comes to the backends, memcache's cache store is not more
# refined than 1 second, therefore we must build in at least a 1
# second overlap. `lock_timeout` of 0 means locks never expire.
if config_args['lock_timeout'] > 0:
config_args['lock_timeout'] += LOCK_WINDOW
for argument, value in config_args.items():
arg_key = '.'.join([prefix, 'arguments', argument])
conf_dict[arg_key] = value
LOG.debug('KVS region configuration for %(name)s: %(config)r',
{'name': self._region.name, 'config': conf_dict})
self._region.configure_from_config(conf_dict, '%s.' % prefix)
def _mutex(self, key):
return self._lock_registry.get(key)
def _create_mutex(self, key):
mutex = self._region.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper(lock_timeout=self._lock_timeout)
class _LockWrapper(object):
"""weakref-capable threading.Lock wrapper."""
def __init__(self, lock_timeout):
self.lock = threading.Lock()
self.lock_timeout = lock_timeout
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def get(self, key):
"""Get a single value from the KVS backend."""
self._assert_configured()
value = self._region.get(key)
if value is NO_VALUE:
raise exception.NotFound(target=key)
return value
def get_multi(self, keys):
"""Get multiple values in a single call from the KVS backend."""
self._assert_configured()
values = self._region.get_multi(keys)
not_found = []
for index, key in enumerate(keys):
if values[index] is NO_VALUE:
not_found.append(key)
if not_found:
# NOTE(morganfainberg): If any of the multi-get values are non-
# existent, we should raise a NotFound error to mimic the .get()
# method's behavior. In all cases the internal dogpile NO_VALUE
# should be masked from the consumer of the KeyValueStore.
raise exception.NotFound(target=not_found)
return values
def set(self, key, value, lock=None):
"""Set a single value in the KVS backend."""
self._assert_configured()
with self._action_with_lock(key, lock):
self._region.set(key, value)
def set_multi(self, mapping):
"""Set multiple key/value pairs in the KVS backend at once.
Like delete_multi, this call does not serialize through the
KeyValueStoreLock mechanism (locking cannot occur on more than one
key in a given context without significant deadlock potential).
"""
self._assert_configured()
self._region.set_multi(mapping)
def delete(self, key, lock=None):
"""Delete a single key from the KVS backend.
This method will raise NotFound if the key doesn't exist. The get and
delete are done in a single transaction (via KeyValueStoreLock
mechanism).
"""
self._assert_configured()
with self._action_with_lock(key, lock):
self.get(key)
self._region.delete(key)
def delete_multi(self, keys):
"""Delete multiple keys from the KVS backend in a single call.
Like set_multi, this call does not serialize through the
KeyValueStoreLock mechanism (locking cannot occur on more than one
key in a given context without significant deadlock potential).
"""
self._assert_configured()
self._region.delete_multi(keys)
def get_lock(self, key):
"""Get a write lock on the KVS value referenced by `key`.
The ability to get a context manager to pass into the set/delete
methods allows for a single-transaction to occur while guaranteeing the
backing store will not change between the start of the 'lock' and the
end. Lock timeout is fixed to the KeyValueStore configured lock
timeout.
"""
self._assert_configured()
return KeyValueStoreLock(self._mutex(key), key, self.locking,
self._lock_timeout)
@contextlib.contextmanager
def _action_with_lock(self, key, lock=None):
"""Wrapper context manager to validate and handle the lock and lock
timeout if passed in.
"""
if not isinstance(lock, KeyValueStoreLock):
# NOTE(morganfainberg): Locking only matters if a lock is passed in
# to this method. If lock isn't a KeyValueStoreLock, treat this as
# if no locking needs to occur.
yield
else:
if not lock.key == key:
raise ValueError(_('Lock key must match target key: %(lock)s '
'!= %(target)s') %
{'lock': lock.key, 'target': key})
if not lock.active:
raise exception.ValidationError(_('Must be called within an '
'active lock context.'))
if not lock.expired:
yield
else:
raise LockTimeout(target=key)
class KeyValueStoreLock(object):
"""Basic KeyValueStoreLock context manager that hooks into the
dogpile.cache backend mutex allowing for distributed locking on resources.
This is only a write lock, and will not prevent reads from occurring.
"""
def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
self.mutex = mutex
self.key = key
self.enabled = locking_enabled
self.lock_timeout = lock_timeout
self.active = False
self.acquire_time = 0
def acquire(self):
if self.enabled:
self.mutex.acquire()
LOG.debug('KVS lock acquired for: %s', self.key)
self.active = True
self.acquire_time = time.time()
return self
__enter__ = acquire
@property
def expired(self):
if self.lock_timeout:
calculated = time.time() - self.acquire_time + LOCK_WINDOW
return calculated > self.lock_timeout
else:
return False
def release(self):
if self.enabled:
self.mutex.release()
if not self.expired:
LOG.debug('KVS lock released for: %s', self.key)
else:
LOG.warning(_LW('KVS lock released (timeout reached) for: %s'),
self.key)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def get_key_value_store(name, kvs_region=None):
"""Instantiate a new :class:`.KeyValueStore` or return a previous
instantiation that has the same name.
"""
global KEY_VALUE_STORE_REGISTRY
_register_backends()
key_value_store = KEY_VALUE_STORE_REGISTRY.get(name)
if key_value_store is None:
if kvs_region is None:
kvs_region = region.make_region(name=name)
key_value_store = KeyValueStore(kvs_region)
KEY_VALUE_STORE_REGISTRY[name] = key_value_store
return key_value_store
|
roopali8/keystone
|
keystone/common/kvs/core.py
|
Python
|
apache-2.0
| 17,459
|
# Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from django.db import models
from django.conf import settings
import eveapi
from core.models import Type, Location
from API.models import CorpAPIKey
from core.models import Corporation, Alliance
from Map.models import System, MapSystem, Map
from API import cache_handler as handler
User = settings.AUTH_USER_MODEL
class POS(models.Model):
"""Represents a POS somewhere in space."""
system = models.ForeignKey(System, related_name="poses")
planet = models.IntegerField()
moon = models.IntegerField()
towertype = models.ForeignKey(Type, related_name="inspace")
corporation = models.ForeignKey(Corporation, related_name="poses")
posname = models.CharField(max_length=100, blank=True, null=True)
fitting = models.TextField(blank=True, null=True)
# Using CCP's status codes here for sanity with API checks
status = models.IntegerField(choices=((0, 'Unanchored'),
(1, 'Anchored'),
(2, 'Onlining'),
(3, 'Reinforced'),
(4, 'Online')))
# This should be the time the tower exits RF
# TODO: add a validator to make sure this is only set
# if status = 3 (Reinforced)
rftime = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField()
# These values will be set by the TSV parser from d-scan data if available
guns = models.IntegerField(null=True, blank=True)
ewar = models.IntegerField(null=True, blank=True)
sma = models.IntegerField(null=True, blank=True)
hardener = models.IntegerField(null=True, blank=True)
# This is a short comment that is displayed as a warning
warpin_notice = models.CharField(blank=True, null=True, max_length=64)
class Meta:
ordering = ['system__name', 'planet', 'moon']
@classmethod
def update_from_import_list(cls, system, import_list):
"""
Imports starbases from YAML importer.
"""
for pos in import_list:
planet = pos['planet']
moon = pos['moon']
warpin = pos['warpin']
status = pos['status']
rftime = pos['rftime']
name = pos['name']
tower = Type.objects.get(name=pos['tower'])
try:
owner = Corporation.objects.get(name=pos['owner'])
except Corporation.DoesNotExist:
from core import tasks
api = eveapi.EVEAPIConnection(cacheHandler=handler)
corp_id = api.eve.CharacterID(
names=pos['owner']).characters[0].characterID
owner = tasks.update_corporation(corp_id, True)
if POS.objects.filter(system=system, planet=planet,
moon=moon, corporation=owner).exists():
# Update first existing record
starbase = POS.objects.filter(system=system, planet=planet,
moon=moon,
corporation=owner).all()[0]
starbase.status = status
starbase.name = name
starbase.towertype = tower
if status == 3:
starbase.rftime = rftime
starbase.warpin_notice = warpin
else:
new_pos = POS(system=system, planet=planet, moon=moon,
corporation=owner, towertype=tower,
warpin_notice=warpin, status=status)
if status == 3:
new_pos.rftime = rftime
new_pos.save()
def as_dict(self):
data = {
'planet': self.planet, 'moon': self.moon,
'tower': self.towertype.name, 'owner': self.corporation.name,
'status': self.status, 'name': self.posname,
'rftime': self.rftime, 'warpin': self.warpin_notice,
}
return data
def clean(self):
from django.core.exceptions import ValidationError
if self.rftime and self.status != 3:
raise ValidationError("A POS cannot have an rftime unless "
"it is reinforced")
def __unicode__(self):
return self.posname
# override save to implement posname defaulting to towertype.name
def save(self, *args, **kwargs):
if not self.posname:
self.posname = self.towertype.name
# Mark tower as having been updated
from datetime import datetime
import pytz
self.updated = datetime.now(pytz.utc)
super(POS, self).save(*args, **kwargs)
def log(self, user, action, map_system):
"""
Records a log entry for POS updates and additions.
"""
map_system.map.add_log(
user,
"%s POS (Planet %s Moon %s, owner %s) in %s (%s), %s jumps out from root system."
%(action, self.planet, self.moon, self.corporation, map_system.system.name,
map_system.friendlyname, map_system.distance_from_root()))
def size(self):
"""
Returns the size of the tower, Small Medium or Large.
"""
if u'Small' in self.towertype.name:
return u'Small'
if u'Medium' in self.towertype.name:
return u'Medium'
return u'Large'
def fit_from_dscan(self, dscan):
"""
Fills in a POS's fitting from a copy / paste of d-scan results.
"""
return self.fit_from_iterable(csv.reader(dscan.splitlines(),
delimiter="\t"))
def fit_from_iterable(self, fit):
"""
Fills in a POS's fitting from an iterable (normally parsed d-scan)
"""
from core.models import Type
item_dict = dict()
# marketGroupIDs to consider guns, ewar, hardeners, and smas
guns_groups = [480, 479, 594, 595, 596]
ewar_groups = [481, 1009]
sma_groups = [484]
hardener_groups = [485]
towers = 0
self.sma = 0
self.hardener = 0
self.guns = 0
self.ewar = 0
for row in fit:
try:
item_type = Type.objects.get(name=row[1], marketgroup__isnull=False)
# odd bug where invalid items get into dscan
except Type.DoesNotExist:
continue
except Type.MultipleObjectsReturned:
# Some types have multiple records for the same name
# When this happens, we will return the first even though
# it may not actually be the POS module type.
item_type = Type.objects.filter(name=row[1], marketgroup__isnull=False).all()[0]
if item_type.marketgroup:
group_tree = []
parent = item_type.marketgroup
while parent:
group_tree.append(parent.id)
parent = parent.parentgroup
if item_type.marketgroup.id in guns_groups:
self.guns += 1
if item_type.marketgroup.id in ewar_groups:
self.ewar += 1
if item_type.marketgroup.id in sma_groups:
self.sma += 1
if item_type.marketgroup.id in hardener_groups:
self.hardener += 1
if item_type.marketgroup.id == 478:
towers += 1
towertype = item_type
posname = row[0]
if item_type.name in item_dict:
item_dict[item_type.name] += 1
elif 1285 in group_tree and 478 not in group_tree:
item_dict.update({item_type.name: 1})
self.fitting = "Imported from D-Scan:\n"
for itemtype in item_dict:
self.fitting += "\n%s : %s" % (itemtype, item_dict[itemtype])
if towers == 1 and self.towertype_id is None and self.posname is None:
self.towertype = towertype
self.posname = posname
if towers == 0 and self.towertype_id is None:
raise AttributeError('No POS in the D-Scan!')
elif towers <= 1:
self.save()
else:
raise AttributeError('Too many towers detected in the D-Scan!')
class CorpPOS(POS):
"""A corp-controlled POS with manager and password data."""
manager = models.ForeignKey(User, null=True, blank=True,
related_name='poses')
password = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
# Let's store the CCP Item ID for the tower here to make API lookup easier
# If it is null, then we are not tracking this POS via API
apiitemid = models.BigIntegerField(null=True, blank=True)
apikey = models.ForeignKey(CorpAPIKey, null=True, blank=True,
related_name='poses')
class Meta:
permissions = (('can_see_pos_pw', 'Can see corp POS passwords.'),
('can_see_all_pos', 'Sees all corp POSes '
'regardless of manager.'),)
class POSApplication(models.Model):
"""Represents an application for a personal POS."""
applicant = models.ForeignKey(User, null=True, blank=True,
related_name='posapps')
towertype = models.ForeignKey(Type, null=True, blank=True,
related_name='posapps')
residents = models.ManyToManyField(User)
normalfit = models.TextField()
siegefit = models.TextField()
# Once it is approved, we will fill in these two to tie the records together
approved = models.DateTimeField(blank=True, null=True)
posrecord = models.ForeignKey(CorpPOS, blank=True, null=True,
related_name='application')
class Meta:
permissions = (('can_close_pos_app',
'Can dispose of corp POS applications.'),)
def __unicode__(self):
return 'Applicant: %s Tower: %s' % (self.applicant.username,
self.towertype.name)
class POSVote(models.Model):
"""Represents a vote on a personal POS application."""
application = models.ForeignKey(POSApplication, related_name='votes')
voter = models.ForeignKey(User, related_name='posvotes')
vote = models.IntegerField(choices=((0, 'Deny'),
(1, 'Approve'),
(2, 'Abstain')))
|
evewspace/eve-wspace
|
evewspace/POS/models.py
|
Python
|
apache-2.0
| 11,294
|
import QuestClass
import requests
import QuestParser
import time
from bs4 import BeautifulSoup
# from QuestClass import QuestSession
# Graduate URLs
myAcademicsGraduateURL = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/HRMS/c/UW_SS_MENU.UW_SS_MYPROG_GRD.GBL"
myAcademicsGraduateGradesURL_HRMS = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/HRMS/c/SA_LEARNER_SERVICES.SSR_SSENRL_GRADE.GBL"
myAcademicsGraduateGradesURL_SA = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/SA/c/SA_LEARNER_SERVICES.SSR_SSENRL_GRADE.GBL"
myAcademicsGraduateUnofficialTranscriptURL_HRMS = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/HRMS/c/SA_LEARNER_SERVICES.SS_AA_REPORT1.GBL"
myAcademicsGraduateUnofficialTranscriptURL_SA = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/SA/c/SA_LEARNER_SERVICES.SS_AA_REPORT1.GBL"
myAcademicsGraduateAdvisorsURL = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/SA/c/SA_LEARNER_SERVICES.SSR_SSADVR.GBL"
myAcademicsGraduateGradOfferURL = "https://quest.pecs.uwaterloo.ca/psc/SS/ACADEMIC/SA/c/UW_SS_MENU.UW_SS_GRD_OFFR_CTR.GBL"
# Undergraduate URLs TODO:
myAcademicsUndergraduateURL = ""
myAcademicsUndergraduateGradesURL = ""
myAcademicsUndergraduateUnofficialTranscriptURL = ""
def postMyAcademics(questSession):
''' Go to My Academics (default tab is first one)
@Param
@Return True/False
'''
if questSession.currentPOSTpage is "MY_ACADEMICS_HOME":
print "POST My Academics: Already In"
return True
else :
postMyAcademics = questSession.getBasicParameters()
postMyAcademics['ICAction'] = 'DERIVED_SSS_SCR_SSS_LINK_ANCHOR1'
# print "POST: My Academics Page"
response = questSession.session.post(questSession.studentCenterURL_HRMS, data = postMyAcademics, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
print "POST My Academics OK"
questSession.currentPOSTpage = "MY_ACADEMICS_HOME"
# questSession.gotoMyAcademics_myProgram()
return True
else:
print "POST My Academics Failed"
return False
def gotoMyAcademics_myProgram(questSession):
''' Go to my undergrad(grad) program
@Param
@Return True/False
'''
if questSession.isUndergraduate:
# TODO
pass
else:
getMyProgramData = {
'Page': 'UW_SS_MYPROG_GRD',
'Action': 'U',
'ExactKeys': 'Y',
'TargetFrameName': 'None'
}
response = questSession.session.get(myAcademicsGraduateURL, data = getMyProgramData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
if (questSession.updateStateNum(response)):
print "GET My Graduate Program Page OK"
# print response.content
return True
print "GET My Graduate Program Page Failed"
return False
def gotoMyAcademics_grades(questSession):
''' Go to my grades
@Param
@Return True/False
'''
getGradesData = {
'Page': 'SSR_SSENRL_GRADE',
'Action': 'A'
}
response = questSession.session.get(myAcademicsGraduateGradesURL_HRMS, data = getGradesData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
if (questSession.updateStateNum(response)):
print "GET Grades Page OK"
# print response.content
return True
print "GET Grades Page Failed"
return False
def postMyAcademics_grades_termIndex(questSession, termIndex):
''' POST to get grades for one term
@Param term index return from gotoMyAcademics_grades
@Return True/False
'''
# If not in the right post postition, change to right post position
if not (questSession.currentPOSTpage is "MY_ACADEMICS_HOME"): # or questSession.currentPOSTpage is "MY_ACADEMICS_GRADES_TERM_LINK"):
if not gotoMyAcademics_grades(questSession): #questSession.postMyAcademics_grades_termLink():
print "POST grades with index: %d Failed" % termIndex
return False
# Start to post
postGradesData = questSession.getBasicParameters()
postGradesData['ICAction'] = 'DERIVED_SSS_SCT_SSR_PB_GO'
postGradesData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
postGradesData['SSR_DUMMY_RECV1$sels$0'] = termIndex # str(termIndex)
postGradesData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
print postGradesData["ICStateNum"]
response = questSession.session.post(myAcademicsGraduateGradesURL_HRMS, data = postGradesData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
print "POST grades with index: %d OK" % termIndex
questSession.currentStateNum += 1
questSession.currentPOSTpage = "MY_ACADEMICS_GRADES_ONE_TERM"
# questSession.gotoMyAcademics_myProgram()
return True
else:
print "POST grades with index: %d Failed" % termIndex
return False
# def postMyAcademics_grades_termLink(questSession):
# if questSession.currentPOSTpage is "MY_ACADEMICS_GRADES_TERM_LINK":
# print "POST Grades term link: Already In"
# return True
# else :
# postData = questSession.getBasicParameters()
# postData['ICAction'] = 'DERIVED_SSS_SCT_SSS_TERM_LINK'
# postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
# postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
# print postData["ICStateNum"]
# response = questSession.session.post(myAcademicsGraduateGradesURL_HRMS, data = postData, allow_redirects = False)
# questSession.currentResponse = response
# if response.status_code == requests.codes.ok:
# # print response.content
# print "POST grades term link OK"
# questSession.currentStateNum += 1
# questSession.currentPOSTpage = "MY_ACADEMICS_GRADES_TERM_LINK"
# return True
# else:
# print "POST grades term link Failed"
# return False
def gotoMyAcademics_unofficialTranscript(questSession):
''' Go to my Unofficial Transcript
@Param
@Return True/False
'''
getUnofficialTranscriptData = {
'Page': 'SS_ES_AARPT_TYPE2',
'Action': 'A'
}
response = questSession.session.get(myAcademicsGraduateUnofficialTranscriptURL_HRMS, data = getUnofficialTranscriptData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
if (questSession.updateStateNum(response)):
print "GET Unofficial Transcript Page OK"
# print response.content
return True
print "GET Unofficial Transcript Page Failed"
return False
# Transcript is stored in questSession.currentResult
def postMyAcademics_unofficialTranscript_option(questSession, academic_option, type_option):
# If not in the right post postition, change to right post position
if not (questSession.currentPOSTpage is "MY_ACADEMICS_UNOFFICIAL_OPTION_LINK"): #or questSession.currentPOSTpage is "MY_ACADEMICS_UNOFFICIAL_OPTION_LINK"):
if not gotoMyAcademics_unofficialTranscript(questSession): #questSession.postMyAcademics_unofficialTranscript_optionLink():
print "POST Unofficial with option: (%s, %s) Failed" % (academic_option, type_option)
return False
# Start to post
postData = questSession.getBasicParameters()
postData['ICAction'] = 'DERIVED_AA2_TSCRPT_TYPE3'
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
postData['SA_REQUEST_HDR_INSTITUTION'] = academic_option
postData['DERIVED_AA2_TSCRPT_TYPE3'] = type_option
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
print postData["ICStateNum"]
response = questSession.session.post(myAcademicsGraduateUnofficialTranscriptURL_HRMS, data = postData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
questSession.currentStateNum += 1
print "POST Unofficial with option: (%s, %s) send OK" % (academic_option, type_option)
postData = questSession.getBasicParameters()
postData["ICAction"] = "GO"
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
postData['SA_REQUEST_HDR_INSTITUTION'] = academic_option
postData['DERIVED_AA2_TSCRPT_TYPE3'] = type_option
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
print postData["ICStateNum"]
response = questSession.session.post(myAcademicsGraduateUnofficialTranscriptURL_HRMS, data = postData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
questSession.currentStateNum += 1
print "POST Unofficial with option: (%s, %s) GO OK" % (academic_option, type_option)
for i in xrange(0, 15):
postData = questSession.getBasicParameters()
postData["ICAction"] = "UW_DERIVED_SR_REFRESH_BTN"
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
print postData["ICStateNum"]
response = questSession.session.post(myAcademicsGraduateUnofficialTranscriptURL_HRMS, data = postData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
questSession.currentStateNum += 1
if checkTranscriptIsDownloaded(questSession, response):
print "Unofficial Transcript GET!"
questSession.currentPOSTpage = "MY_ACADEMICS_UNOFFICIAL_OPTION"
return True
print "POST Unofficial with option: (%s, %s) REFRESH OK" % (academic_option, type_option)
time.sleep(1)
else :
print "POST Unofficial with option: (%s, %s) Failed" % (academic_option, type_option)
return False
# Time out, return false
questSession.currentPOSTpage = "MY_ACADEMICS_UNOFFICIAL_OPTION"
# questSession.gotoMyAcademics_myProgram()
return False
return False
else:
print "POST Unofficial with option: (%s, %s) Failed" % (academic_option, type_option)
return False
# def postMyAcademics_unofficialTranscript_optionLink(questSession):
# if questSession.currentPOSTpage is "MY_ACADEMICS_UNOFFICIAL_OPTION_LINK":
# print "POST Unofficial link: Already In"
# return True
# else :
# postData = questSession.getBasicParameters()
# postData['ICAction'] = 'DERIVED_AA2_DERIVED_LINK3'
# postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$7$'] = '9999'
# postData['DERIVED_SSTSNAV_SSTS_MAIN_GOTO$8$'] = '9999'
# response = questSession.session.post(myAcademicsGraduateUnofficialTranscriptURL_HRMS, data = postData, allow_redirects = False)
# questSession.currentResponse = response
# if response.status_code == requests.codes.ok:
# print "POST Unofficial link OK"
# questSession.currentStateNum += 1
# questSession.currentPOSTpage = "MY_ACADEMICS_UNOFFICIAL_OPTION_LINK"
# return True
# else:
# print "POST Unofficial link Failed"
# return False
def checkTranscriptIsDownloaded(questSession, response):
prettifiedContent = response.content.replace("<![CDATA[", "<").replace("]]>", ">")
soup = BeautifulSoup(prettifiedContent)
# print soup.prettify()
transcript = soup.find(id="PrintTranscript")
if not transcript is None:
questSession.currentResult = prettifiedContent
return True
else:
return False
def gotoMyAcademics_advisors(questSession):
''' Go to my My Advisors
@Param
@Return True/False
'''
getAdvisorsData = {
'Page': 'SSR_SSADVR',
'Action': 'U'
}
response = questSession.session.get(myAcademicsGraduateAdvisorsURL, data = getAdvisorsData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
if (questSession.updateStateNum(response)):
print "GET My Advisors Page OK"
# print response.content
return True
print "GET My Advisors Page Failed"
return False
def gotoMyAcademics_graduateOfferLetters(questSession):
''' Go to my Graduate Offer Letters
@Param
@Return True/False
'''
getGraduateOfferData = {
'Page': 'UW_SS_GRD_OFFR_CTR',
'Action': 'U'
}
response = questSession.session.get(myAcademicsGraduateGradOfferURL, data = getGraduateOfferData, allow_redirects = False)
questSession.currentResponse = response
if response.status_code == requests.codes.ok:
if (questSession.updateStateNum(response)):
print "GET Graduate Offer Letters Page OK"
# print response.content
return True
print "GET Graduate Offer Letters Page Failed"
return False
def main():
myQuest = QuestClass.QuestSession("", "") # "userid", "password"
myQuest.login()
myQuest.postMyAcademics()
# myQuest.gotoMyAcademics_myProgram()
# print QuestParser.API_myAcademics_myProgramResponse(myQuest)
# myQuest.gotoMyAcademics_grades()
# print QuestParser.API_myAcademics_gradesResponse(myQuest)
# myQuest.postMyAcademics_grades_termIndex(1)
# print QuestParser.API_myAcademics_gradesTermResponse(myQuest)
# myQuest.postMyAcademics_grades_termIndex(3)
# print QuestParser.API_myAcademics_gradesTermResponse(myQuest)
# myQuest.postMyAcademics_grades_termIndex(0)
# print QuestParser.API_myAcademics_gradesTermResponse(myQuest)
# myQuest.postMyAcademics_grades_termIndex(0)
# print QuestParser.API_myAcademics_gradesTermResponse(myQuest)
# myQuest.gotoMyAcademics_unofficialTranscript()
# print QuestParser.API_myAcademics_unofficialTranscriptResponse(myQuest)
myQuest.postMyAcademics_unofficialTranscript_option('UWATR', 'UNGRD')
print QuestParser.API_myAcademics_unofficialTranscriptResultResponse(myQuest)
myQuest.gotoMyAcademics_advisors()
print QuestParser.API_myAcademics_myAdvisorResponse(myQuest)
if __name__ == '__main__':
main()
|
honghaoz/UWQuestAPI
|
uw-quest/UWQuestAPI/MyAcademics.py
|
Python
|
apache-2.0
| 13,140
|
# For detailed comments on animation and the techniqes used here, see
# the wiki entry
# http://www.scipy.org/wikis/topical_software/MatplotlibAnimation
# The number of blits() to make before exiting
NBLITS = 1000
import matplotlib
matplotlib.use('WXAgg')
matplotlib.rcParams['toolbar'] = 'None'
import wx
import sys
import pylab as p
import numpy as npy
import time
# allow the user to disable the WXAgg accelerator from the command line
if '--no-accel' in sys.argv:
import matplotlib.backends.backend_wxagg
matplotlib.backends.backend_wxagg._use_accelerator(False)
ax = p.subplot(111)
canvas = ax.figure.canvas
p.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs
p.grid() # to ensure proper background restore
# create the initial line
x = npy.arange(0,2*npy.pi,0.01)
line, = p.plot(x, npy.sin(x), animated=True, lw=2)
# for profiling
tstart = time.time()
blit_time = 0.0
def update_line(*args):
global blit_time
if update_line.background is None:
update_line.background = canvas.copy_from_bbox(ax.bbox)
# restore the clean slate background
canvas.restore_region(update_line.background)
# update the data
line.set_ydata(npy.sin(x+update_line.cnt/10.0))
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
t = time.time()
canvas.blit(ax.bbox)
blit_time += time.time() - t
if update_line.cnt == NBLITS:
# print the timing info and quit
frame_time = time.time() - tstart
print '%d frames: %.2f seconds' % (NBLITS, frame_time)
print '%d blits: %.2f seconds' % (NBLITS, blit_time)
print
print 'FPS: %.2f' % (NBLITS/frame_time)
print 'BPS: %.2f' % (NBLITS/blit_time)
sys.exit()
update_line.cnt += 1
wx.WakeUpIdle()
update_line.cnt = 0
update_line.background = None
wx.EVT_IDLE(wx.GetApp(), update_line)
p.show()
|
sniemi/SamPy
|
sandbox/src1/examples/animation_blit_wx.py
|
Python
|
bsd-2-clause
| 1,914
|
import copy
import threading
import time
import warnings
from collections import deque
from contextlib import contextmanager
import _thread
import pytz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils import timezone
from django.utils.functional import cached_property
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
display_name = 'unknown'
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self._thread_sharing_lock = threading.Lock()
self._thread_sharing_count = 0
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func) tuple, where sids is a set of the
# active savepoint IDs when this function was registered.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Time zone for datetimes stored as naive values in the database.
Return a tzinfo object or None.
This is only needed when time zone support is enabled and the database
doesn't support time zones. (When the database supports time zones,
the adapter handles aware datetimes so Django doesn't need to.)
"""
if not settings.USE_TZ:
return None
elif self.features.supports_timezones:
return None
elif self.settings_dict['TIME_ZONE'] is None:
return timezone.utc
else:
return pytz.timezone(self.settings_dict['TIME_ZONE'])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict['TIME_ZONE'] is None:
return 'UTC'
else:
return self.settings_dict['TIME_ZONE']
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.monotonic() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict['TIME_ZONE'] is not None:
if not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is "
"False." % self.alias)
elif self.features.supports_timezones:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because its engine "
"handles time zones conversions natively." % self.alias)
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func) for (sids, func) in self.run_on_commit if sid not in sids
]
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explcit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit and not autocommit and
hasattr(self, '_start_transaction_under_autocommit')
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.monotonic() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
@property
def allow_thread_sharing(self):
with self._thread_sharing_lock:
return self._thread_sharing_count > 0
def inc_thread_sharing(self):
with self._thread_sharing_lock:
self._thread_sharing_count += 1
def dec_thread_sharing(self):
with self._thread_sharing_lock:
if self._thread_sharing_count <= 0:
raise RuntimeError('Cannot decrement the thread sharing count below zero.')
self._thread_sharing_count -= 1
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `inc_thread_sharing()`
method). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@property
def _nodb_connection(self):
"""
Return an alternative connection to be used when there is no need to
access the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
return self.__class__({**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS)
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func):
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func))
elif not self.get_autocommit():
raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
sids, func = current_run_on_commit.pop(0)
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
return type(self)(settings_dict, alias)
|
fenginx/django
|
django/db/backends/base/base.py
|
Python
|
bsd-3-clause
| 24,322
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import ProjectDSymFile, GlobalDSymFile
@register(ProjectDSymFile)
@register(GlobalDSymFile)
class DSymFileSerializer(Serializer):
def serialize(self, obj, attrs, user):
d = {
'id': six.text_type(obj.id),
'uuid': obj.uuid,
'cpuName': obj.cpu_name,
'objectName': obj.object_name,
'symbolType': obj.dsym_type,
'headers': obj.file.headers,
'size': obj.file.size,
'sha1': obj.file.checksum,
'dateCreated': obj.file.timestamp,
}
return d
|
alexm92/sentry
|
src/sentry/api/serializers/models/dsym_file.py
|
Python
|
bsd-3-clause
| 697
|
"""Main test file for the pyrabbit Client."""
import json
try:
#python 2.x
import unittest2 as unittest
except ImportError:
#python 3.x
import unittest
import sys
sys.path.append('..')
import pyrabbit
from mock import Mock, patch
class TestClient(unittest.TestCase):
def setUp(self):
self.client = pyrabbit.api.Client('localhost:55672', 'guest', 'guest')
def tearDown(self):
del self.client
def test_server_init_200(self):
self.assertIsInstance(self.client, pyrabbit.api.Client)
self.assertEqual(self.client.host, 'localhost:55672')
def test_server_is_alive_default_vhost(self):
response = {'status': 'ok'}
self.client.http.do_call = Mock(return_value=response)
with patch.object(pyrabbit.api.Client, 'has_admin_rights') as mock_rights:
mock_rights.__get__ = Mock(return_value=True)
self.assertTrue(self.client.is_alive())
def test_get_vhosts_200(self):
self.client.http.do_call = Mock(return_value=[])
vhosts = self.client.get_all_vhosts()
self.assertIsInstance(vhosts, list)
def test_get_all_queues(self):
self.client.http.do_call = Mock(return_value=[])
queues = self.client.get_queues()
self.assertIsInstance(queues, list)
def test_get_nodes(self):
self.client.http.do_call = Mock(return_value=[])
nodes = self.client.get_nodes()
self.assertIsInstance(nodes, list)
def test_purge_queues(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.purge_queues(['q1', 'q2']))
def test_get_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_queue('', 'q1'))
def test_get_all_exchanges(self):
xchs = [{'name': 'foo', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}},
{'name': 'bar', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}},]
self.client.http.do_call = Mock(return_value=xchs)
xlist = self.client.get_exchanges()
self.assertIsInstance(xlist, list)
self.assertEqual(len(xlist), 2)
def test_get_named_exchange(self):
xch = {'name': 'foo', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}}
self.client.http.do_call = Mock(return_value=xch)
myexch = self.client.get_exchange('%2F', 'foo')
self.assertEqual(myexch['name'], 'foo')
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_get_users_noprivs(self, has_rights):
has_rights.__get__ = Mock(return_value=False)
self.assertRaises(pyrabbit.api.PermissionError, self.client.get_users)
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_get_users_withprivs(self, has_rights):
has_rights.return_value = True
with patch('pyrabbit.http.HTTPClient.do_call') as do_call:
self.assertTrue(self.client.get_users())
def test_get_queue_depth(self):
q = {'messages': 4}
self.client.http.do_call = Mock(return_value=q)
depth = self.client.get_queue_depth('/', 'test')
self.assertEqual(depth, q['messages'])
def test_get_queue_depth_2(self):
"""
An integration test that includes the HTTP client's do_call
method and json decoding operations.
"""
q = {'messages': 8}
json_q = json.dumps(q)
with patch('httplib2.Response') as resp:
resp.reason = 'response reason here'
resp.status = 200
self.client.http.client.request = Mock(return_value=(resp, json_q))
depth = self.client.get_queue_depth('/', 'test')
self.assertEqual(depth, q['messages'])
def test_purge_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.purge_queue('vname', 'qname'))
def test_create_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_queue('qname', 'vname'))
def test_get_connections(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_connections())
def test_get_connection(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_connection('cname'))
def test_delete_connection(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_connection('127.0.0.1:1234 -> 127.0.0.1:5678 (1)'))
def test_get_channels(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_channels())
def test_get_channel(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_channel('127.0.0.1:1234 -> 127.0.0.1:5678 (1)'))
def test_get_bindings(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_bindings())
def test_create_binding(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_binding('vhost',
'exch',
'queue',
'rt_key'))
def test_delete_binding(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_binding('vhost',
'exch',
'queue',
'rt_key'))
def test_publish(self):
self.client.http.do_call = Mock(return_value={'routed': 'true'})
self.assertTrue(self.client.publish('vhost', 'xname', 'rt_key',
'payload'))
def test_create_vhost(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_vhost('vname'))
def test_delete_vhost(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_vhost('vname'))
def test_create_user(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_user('user', 'password'))
def test_delete_user(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_user('user'))
def test_get_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_permissions())
def test_get_vhost_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_vhost_permissions('vname'))
def test_get_user_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_user_permissions('username'))
def test_delete_permission(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_permission('vname', 'username'))
def test_get_permission(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_permission('vname', 'username'))
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_is_alive_withprivs(self, mock_rights):
mock_rights.__get__ = Mock(return_value=True)
with patch('pyrabbit.http.HTTPClient.do_call') as do_call:
do_call.return_value = {'status': 'ok'}
self.assertTrue(self.client.is_alive())
def test_is_alive_noprivs(self):
with patch.object(pyrabbit.api.Client, 'has_admin_rights') as mock_rights:
mock_rights.__get__ = Mock(return_value=False)
self.assertRaises(pyrabbit.api.PermissionError, self.client.is_alive)
def test_has_admin_rights(self):
response = {
'auth_backend': 'rabbit_auth_backend_internal',
'name': 'guest',
'tags': 'administrator',
}
self.client.get_whoami = Mock(return_value=response)
with patch.object(pyrabbit.api.Client, 'get_whoami') as mock_whoami:
mock_whoami.__get__ = Mock(return_value=True)
self.assertTrue(self.client.has_admin_rights)
@unittest.skip
class TestLiveServer(unittest.TestCase):
def setUp(self):
self.rabbit = pyrabbit.api.Client('localhost:15672', 'guest', 'guest')
self.vhost_name = 'pyrabbit_test_vhost'
self.exchange_name = 'pyrabbit_test_exchange'
self.queue_name = 'pyrabbit_test_queue'
self.rt_key = 'pyrabbit-roundtrip'
self.payload = 'pyrabbit test message payload'
self.user = 'guest'
def test_round_trip(self):
"""
This does a 'round trip' test, which consists of the following steps:
* Create a vhost, and verify creation
* Give 'guest' all perms on vhost
* Create an exchange in that vhost, verify creation
* Create a queue
* Create a binding between the queue and exchange
* Publish a message to the exchange that makes it to the queue
* Grab that message from the queue (verify it's the same message)
* Delete binding and verify we don't receive messages
* Delete the exchange
* Delete the vhost
"""
# create a vhost, verify creation, and grant all perms to 'guest'.
self.rabbit.create_vhost(self.vhost_name)
vhosts = [i['name'] for i in self.rabbit.get_all_vhosts()]
self.assertIn(self.vhost_name, vhosts)
self.rabbit.set_vhost_permissions(self.vhost_name, self.user,
'.*', '.*', '.*')
# create an exchange, and verify creation.
self.rabbit.create_exchange(self.vhost_name,
self.exchange_name,
'direct')
self.assertEqual(self.exchange_name,
self.rabbit.get_exchange(self.vhost_name,
self.exchange_name)['name'])
# create a queue and verify it was created
self.rabbit.create_queue(self.vhost_name,self.queue_name)
self.assertEqual(self.queue_name,
self.rabbit.get_queue(self.vhost_name,
self.queue_name)['name'])
# bind the queue and exchange
self.rabbit.create_binding(self.vhost_name, self.exchange_name,
self.queue_name, self.rt_key)
# publish a message, and verify by getting it back.
self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
self.payload)
messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
self.assertEqual(messages[0]['payload'], self.payload)
# delete binding and verify we don't get the message
self.rabbit.delete_binding(self.vhost_name, self.exchange_name,
self.queue_name, self.rt_key)
self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
self.payload)
messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
self.assertIsNone(messages)
# Clean up.
self.rabbit.delete_exchange(self.vhost_name, self.exchange_name)
self.rabbit.delete_vhost(self.vhost_name)
if __name__ == "__main__":
log = open('test_out.log', 'w')
unittest.main(testRunner=unittest.TextTestRunner(log))
|
vshn/pyrabbit-debian
|
tests/test_pyrabbit.py
|
Python
|
bsd-3-clause
| 12,041
|
from braintree.attribute_getter import AttributeGetter
class FundingDetails(AttributeGetter):
detail_list = [
"account_number_last_4",
"routing_number",
"destination",
"email",
"mobile_phone",
]
def __init__(self, attributes):
AttributeGetter.__init__(self, attributes)
def __repr__(self):
return super(FundingDetails, self).__repr__(self.detail_list)
|
DiptoDas8/Biponi
|
lib/python2.7/site-packages/braintree/merchant_account/funding_details.py
|
Python
|
mit
| 427
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameAvailabilityInput(Model):
"""Input of check name availability API.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: The Search service name to validate. Search service names
must only contain lowercase letters, digits or dashes, cannot use dash as
the first two or last one characters, cannot contain consecutive dashes,
and must be between 2 and 60 characters in length.
:type name: str
:ivar type: The type of the resource whose name is to be validated. This
value must always be 'searchServices'. Default value: "searchServices" .
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "searchServices"
def __init__(self, name):
self.name = name
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-search/azure/mgmt/search/models/check_name_availability_input.py
|
Python
|
mit
| 1,521
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import os
import time
import redis
from functools import wraps
from frappe.utils import get_site_path
from frappe import conf
END_LINE = '<!-- frappe: end-file -->'
TASK_LOG_MAX_AGE = 86400 # 1 day in seconds
redis_server = None
def handler(f):
cmd = f.__module__ + '.' + f.__name__
def _run(args, set_in_response=True):
from frappe.tasks import run_async_task
from frappe.handler import execute_cmd
if frappe.conf.disable_async:
return execute_cmd(cmd, from_async=True)
args = frappe._dict(args)
task = run_async_task.delay(frappe.local.site,
(frappe.session and frappe.session.user) or 'Administrator', cmd, args)
if set_in_response:
frappe.local.response['task_id'] = task.id
return task.id
@wraps(f)
def queue(*args, **kwargs):
from frappe.tasks import run_async_task
from frappe.handler import execute_cmd
if frappe.conf.disable_async:
return execute_cmd(cmd, from_async=True)
task = run_async_task.delay(frappe.local.site,
(frappe.session and frappe.session.user) or 'Administrator', cmd,
frappe.local.form_dict)
frappe.local.response['task_id'] = task.id
return {
"status": "queued",
"task_id": task.id
}
queue.async = True
queue.queue = f
queue.run = _run
frappe.whitelisted.append(f)
frappe.whitelisted.append(queue)
return queue
def run_async_task(method, args, reference_doctype=None, reference_name=None, set_in_response=True):
if frappe.local.request and frappe.local.request.method == "GET":
frappe.throw("Cannot run task in a GET request")
task_id = method.run(args, set_in_response=set_in_response)
task = frappe.new_doc("Async Task")
task.celery_task_id = task_id
task.status = "Queued"
task.reference_doctype = reference_doctype
task.reference_name = reference_name
task.save()
return task_id
@frappe.whitelist()
def get_pending_tasks_for_doc(doctype, docname):
return frappe.db.sql_list("select name from `tabAsync Task` where status in ('Queued', 'Running') and reference_doctype='%s' and reference_name='%s'" % (doctype, docname))
@handler
def ping():
from time import sleep
sleep(6)
return "pong"
@frappe.whitelist()
def get_task_status(task_id):
from frappe.celery_app import get_celery
c = get_celery()
a = c.AsyncResult(task_id)
frappe.local.response['response'] = a.result
return {
"state": a.state,
"progress": 0
}
def set_task_status(task_id, status, response=None):
frappe.db.set_value("Async Task", task_id, "status", status)
if not response:
response = {}
response.update({
"status": status,
"task_id": task_id
})
emit_via_redis("task_status_change", response, room="task:" + task_id)
def remove_old_task_logs():
logs_path = get_site_path('task-logs')
def full_path(_file):
return os.path.join(logs_path, _file)
files_to_remove = [full_path(_file) for _file in os.listdir(logs_path)]
files_to_remove = [_file for _file in files_to_remove if is_file_old(_file) and os.path.isfile(_file)]
for _file in files_to_remove:
os.remove(_file)
def is_file_old(file_path):
return ((time.time() - os.stat(file_path).st_mtime) > TASK_LOG_MAX_AGE)
def publish_realtime(event=None, message=None, room=None, user=None, doctype=None, docname=None, now=False):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc. that will be handled by the client (default is `task_progress` if within task or `global`)
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname"""
if message is None:
message = {}
if event is None:
if frappe.local.task_id:
event = "task_progress"
else:
event = "global"
if not room:
if frappe.local.task_id:
room = get_task_progress_room()
if not "task_id" in message:
message["task_id"] = frappe.local.task_id
now = True
elif user:
room = get_user_room(user)
elif doctype and docname:
room = get_doc_room(doctype, docname)
else:
room = get_site_room()
if now:
emit_via_redis(event, message, room)
else:
frappe.local.realtime_log.append([event, message, room])
def emit_via_redis(event, message, room):
"""Publish real-time updates via redis
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: name of the room"""
r = get_redis_server()
try:
r.publish('events', frappe.as_json({'event': event, 'message': message, 'room': room}))
except redis.exceptions.ConnectionError:
pass
def put_log(line_no, line, task_id=None):
r = get_redis_server()
if not task_id:
task_id = frappe.local.task_id
task_progress_room = get_task_progress_room()
task_log_key = "task_log:" + task_id
publish_realtime('task_progress', {
"message": {
"lines": {line_no: line}
},
"task_id": task_id
}, room=task_progress_room)
r.hset(task_log_key, line_no, line)
r.expire(task_log_key, 3600)
def get_redis_server():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from redis import Redis
redis_server = Redis.from_url(conf.get("async_redis_server") or "redis://localhost:12311")
return redis_server
class FileAndRedisStream(file):
def __init__(self, *args, **kwargs):
ret = super(FileAndRedisStream, self).__init__(*args, **kwargs)
self.count = 0
return ret
def write(self, data):
ret = super(FileAndRedisStream, self).write(data)
if frappe.local.task_id:
put_log(self.count, data, task_id=frappe.local.task_id)
self.count += 1
return ret
def get_std_streams(task_id):
stdout = FileAndRedisStream(get_task_log_file_path(task_id, 'stdout'), 'w')
# stderr = FileAndRedisStream(get_task_log_file_path(task_id, 'stderr'), 'w')
return stdout, stdout
def get_task_log_file_path(task_id, stream_type):
logs_dir = frappe.utils.get_site_path('task-logs')
return os.path.join(logs_dir, task_id + '.' + stream_type)
@frappe.whitelist(allow_guest=True)
def can_subscribe_doc(doctype, docname, sid):
from frappe.sessions import Session
from frappe.exceptions import PermissionError
session = Session(None).get_session_data()
if not frappe.has_permission(user=session.user, doctype=doctype, doc=docname, ptype='read'):
raise PermissionError()
return True
@frappe.whitelist(allow_guest=True)
def get_user_info(sid):
from frappe.sessions import Session
session = Session(None, resume=True).get_session_data()
return {
'user': session.user,
}
def get_doc_room(doctype, docname):
return ''.join([frappe.local.site, ':doc:', doctype, '/', docname])
def get_user_room(user):
return ''.join([frappe.local.site, ':user:', user])
def get_site_room():
return ''.join([frappe.local.site, ':all'])
def get_task_progress_room():
return "task_progress:" + frappe.local.task_id
|
indictranstech/reciphergroup-frappe
|
frappe/async.py
|
Python
|
mit
| 7,083
|
'''Simple data loader module.
Loads data files from the "data" directory shipped with a game.
Enhancing this to handle caching etc. is left as an exercise for the reader.
'''
import os
data_py = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.normpath(os.path.join(data_py, '..', 'data'))
def filepath(filename):
'''Determine the path to a file in the data directory.
'''
return os.path.join(data_dir, filename)
def load(filename, mode='rb'):
'''Open a file in the data directory.
"mode" is passed as the second arg to open().
'''
return open(os.path.join(data_dir, filename), mode)
|
rozifus/TeamStrong14_5
|
old_gamelib/data.py
|
Python
|
gpl-2.0
| 633
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_shannon.py
----------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context)
def processCommand(alg, parameters, context):
configFile(alg, parameters, context)
|
CS-SI/QGIS
|
python/plugins/processing/algs/grass7/ext/r_li_shannon.py
|
Python
|
gpl-2.0
| 1,374
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Handler
from CTK.Plugin import instance_plugin
URL_APPLY = '/plugin/common/apply'
HELPS = [('modules_handlers_common', N_("List & Send"))]
NOTE_PATHINFO = N_("Allow extra tailing paths")
NOTE_DIRLIST = N_("Allow to list directory contents")
class Plugin_common (Handler.PluginHandler):
def __init__ (self, key, **kwargs):
Handler.PluginHandler.__init__ (self, key, **kwargs)
Handler.PluginHandler.AddCommon (self)
table = CTK.PropsTable()
table.Add (_('Allow PathInfo'), CTK.CheckCfgText('%s!allow_pathinfo'%(key), False, _('Allow')), _(NOTE_PATHINFO))
table.Add (_('Allow Directory Listing'), CTK.CheckCfgText('%s!allow_dirlist'%(key), True, _('Allow')), _(NOTE_DIRLIST))
submit = CTK.Submitter (URL_APPLY)
submit += table
self += CTK.RawHTML ('<h2>%s</h2>' %(_('Parsing')))
self += CTK.Indenter (submit)
self += instance_plugin('file', key, show_document_root=False, symlinks=False)
self += instance_plugin('dirlist', key, show_document_root=False, symlinks=True)
CTK.publish ('^%s'%(URL_APPLY), CTK.cfg_apply_post, method="POST")
|
lmcro/webserver
|
admin/plugins/common.py
|
Python
|
gpl-2.0
| 1,972
|
#! /usr/bin/env python
# src-sniff.py: checks source code for patterns that look like common errors.
# Copyright (C) 2007, 2010, 2011 Free Software Foundation, Inc.
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Many of these would probably be better as gnulib syntax checks, because
# gnulib provides a way of disabling checks for particular files, and
# has a wider range of checks. Indeed, many of these checks do in fact
# check the same thing as "make syntax-check".
import os.path
import re
import sys
C_ISH_FILENAME = "\.(c|cc|h|cpp|cxx|hxx)$"
C_ISH_FILENAME_RE = re.compile(C_ISH_FILENAME)
C_MODULE_FILENAME_RE = re.compile("\.(c|cc|cpp|cxx)$")
FIRST_INCLUDE = 'config.h'
problems = 0
def Problem(**kwargs):
global problems
problems += 1
msg = kwargs['message']
if kwargs['line']:
location = "%(filename)s:%(line)d" % kwargs
else:
location = "%(filename)s" % kwargs
detail = msg % kwargs
print >>sys.stderr, "error: %s: %s" % (location, detail)
class RegexSniffer(object):
def __init__(self, source, message, regexflags=0):
super(RegexSniffer, self).__init__()
self._regex = re.compile(source, regexflags)
self._msg = message
def Sniff(self, text, filename, line):
#print >>sys.stderr, ("Matching %s against %s"
# % (text, self._regex.pattern))
m = self._regex.search(text)
if m:
if line is None:
line = 1 + m.string.count('\n', 1, m.start(0))
args = {
'filename' : filename,
'line' : line,
'fulltext' : text,
'matchtext': m.group(0),
'message' : self._msg
}
Problem(**args)
class RegexChecker(object):
def __init__(self, regex, line_smells, file_smells):
super(RegexChecker, self).__init__()
self._regex = re.compile(regex)
self._line_sniffers = [RegexSniffer(s[0],s[1]) for s in line_smells]
self._file_sniffers = [RegexSniffer(s[0],s[1],re.S|re.M) for s in file_smells]
def Check(self, filename, lines, fulltext):
if self._regex.search(filename):
# We recognise this type of file.
for line_number, line_text in lines:
for sniffer in self._line_sniffers:
sniffer.Sniff(line_text, filename, line_number)
for sniffer in self._file_sniffers:
sniffer.Sniff(fulltext, filename, None)
else:
# We don't know how to check this file. Skip it.
pass
class MakefileRegexChecker(object):
MAKEFILE_PRIORITY_LIST = ['Makefile.am', 'Makefile.in', 'Makefile']
MAKEFILE_REGEX = ''.join(
'|'.join(['(%s)' % pattern for pattern in MAKEFILE_PRIORITY_LIST]))
def __init__(self, line_smells, file_smells):
self._file_regex = re.compile(self.MAKEFILE_REGEX)
self._rxc = RegexChecker(self.MAKEFILE_REGEX, line_smells, file_smells)
def WantToCheck(self, filename):
if not self._file_regex.search(filename):
return False
makefile_base = os.path.basename(filename)
makefile_dir = os.path.dirname(filename)
for base in self.MAKEFILE_PRIORITY_LIST:
path = os.path.join(makefile_dir, base)
if os.path.exists(path):
if path == filename:
# The first existing name in MAKEFILE_PRIORITY_LIST
# is actually this file, so we want to check it.
return True
else:
# These is another (source) Makefile we want to check
# instead.
return False
# If we get to here we were asked about a file which either
# doesn't exist or which doesn't look like anything in
# MAKEFILE_PRIORITY_LIST. So give the go-ahead to check it.
return True
def Check(self, filename, lines, fulltext):
if self.WantToCheck(filename):
self._rxc.Check(filename, lines, fulltext)
checkers = [
# Check C-like languages for C code smells.
RegexChecker(C_ISH_FILENAME_RE,
# line smells
[
[r'^\s*#\s*define\s+(_[A-Z_]+)', "Don't use reserved macro names"],
[r'(?<!\w)free \(\(', "don't cast the argument to free()"],
[r'\*\) *x(m|c|re)alloc(?!\w)',"don't cast the result of x*alloc"],
[r'\*\) *alloca(?!\w)',"don't cast the result of alloca"],
[r'[ ] ',"found SPACE-TAB; remove the space"],
[r'(?<!\w)([fs]?scanf|ato([filq]|ll))(?!\w)', 'do not use %(matchtext)s'],
[r'error \(EXIT_SUCCESS',"passing EXIT_SUCCESS to error is confusing"],
[r'file[s]ystem', "prefer writing 'file system' to 'filesystem'"],
[r'HAVE''_CONFIG_H', "Avoid checking HAVE_CONFIG_H"],
[r'HAVE_FCNTL_H', "Avoid checking HAVE_FCNTL_H"],
[r'O_NDELAY', "Avoid using O_NDELAY"],
[r'the\s*the', "'the"+" the' is probably not deliberate"],
[r'(?<!\w)error \([^_"]*[^_]"[^"]*[a-z]{3}', "untranslated error message"],
[r'^# *if\s+defined *\(', "useless parentheses in '#if defined'"],
],
[
[r'# *include <assert.h>(?!.*assert \()',
"If you include <assert.h>, use assert()."],
[r'# *include "quotearg.h"(?!.*(?<!\w)quotearg(_[^ ]+)? \()',
"If you include \"quotearg.h\", use one of its functions."],
[r'# *include "quote.h"(?!.*(?<!\w)quote(_[^ ]+)? \()',
"If you include \"quote.h\", use one of its functions."],
]),
# Check Makefiles for Makefile code smells.
MakefileRegexChecker([ [r'^ ', "Spaces at start of makefile line"], ],
[]),
# Check everything for whitespace problems.
RegexChecker('', [], [[r'[ ]$',
"trailing whitespace '%(matchtext)s'"],]),
# Check everything for out of date addresses.
RegexChecker('', [], [
[r'675\s*Mass\s*Ave,\s*02139[^a-zA-Z]*USA',
"out of date FSF address"],
[r'59 Temple Place.*02111-?1307\s*USA',
"out of date FSF address %(matchtext)s"],
]),
# Check everything for GPL version regression
RegexChecker('',
[],
[[r'G(nu |eneral )?P(ublic )?L(icense)?.{1,200}version [12]',
"Out of date GPL version: %(matchtext)s"],
]),
# Bourne shell code smells
RegexChecker('\.sh$',
[
['for\s*\w+\s*in.*;\s*do',
# Solaris 10 /bin/sh rejects this, see Autoconf manual
"for loops should not contain a 'do' on the same line."],
], []),
]
# missing check: ChangeLog prefixes
# missing: sc_always_defined_macros from coreutils
# missing: sc_tight_scope
def Warning(filename, desc):
print >> sys.stderr, "warning: %s: %s" % (filename, desc)
def BuildIncludeList(text):
"""Build a list of included files, with line numbers.
Args:
text: the full text of the source file
Returns:
[ ('config.h',32), ('assert.h',33), ... ]
"""
include_re = re.compile(r'# *include +[<"](.*)[>"]')
includes = []
last_include_pos = 1
line = 1
for m in include_re.finditer(text):
header = m.group(1)
# Count only the number of lines between the last include and
# this one. Counting them from the beginning would be quadratic.
line += m.string.count('\n', last_include_pos, m.start(0))
last_include_pos = m.end()
includes.append( (header,line) )
return includes
def CheckStatHeader(filename, lines, fulltext):
stat_hdr_re = re.compile(r'# *include .*<sys/stat.h>')
# It's OK to have a pointer though.
stat_use_re = re.compile(r'struct stat\W *[^*]')
for line in lines:
m = stat_use_re.search(line[1])
if m:
msg = "If you use struct stat, you must #include <sys/stat.h> first"
Problem(filename = filename, line = line[0], message = msg)
# Diagnose only once
break
m = stat_hdr_re.search(line[1])
if m:
break
def CheckFirstInclude(filename, lines, fulltext):
includes = BuildIncludeList(fulltext)
#print "Include map:"
#for name, line in includes:
# print "%s:%d: %s" % (filename, line, name)
if includes:
actual_first_include = includes[0][0]
else:
actual_first_include = None
if actual_first_include and actual_first_include != FIRST_INCLUDE:
if FIRST_INCLUDE in [inc[0] for inc in includes]:
msg = ("%(actual_first_include)s is the first included file, "
"but %(required_first_include)s should be included first")
Problem(filename=filename, line=includes[0][1], message=msg,
actual_first_include=actual_first_include,
required_first_include = FIRST_INCLUDE)
if FIRST_INCLUDE not in [inc[0] for inc in includes]:
Warning(filename,
"%s should be included by most files" % FIRST_INCLUDE)
def SniffSourceFile(filename, lines, fulltext):
if C_MODULE_FILENAME_RE.search(filename):
CheckFirstInclude(filename, lines, fulltext)
CheckStatHeader (filename, lines, fulltext)
for checker in checkers:
checker.Check(filename, lines, fulltext)
def main(args):
"main program"
for srcfile in args[1:]:
f = open(srcfile)
line_number = 1
lines = []
for line in f.readlines():
lines.append( (line_number, line) )
line_number += 1
fulltext = ''.join([line[1] for line in lines])
SniffSourceFile(srcfile, lines, fulltext)
f.close()
if problems:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
krichter722/findutils
|
build-aux/src-sniff.py
|
Python
|
gpl-3.0
| 10,412
|
from ete3 import Tree, TextFace, NodeStyle, TreeStyle
t = Tree("((a,b),c);")
right_c0_r0 = TextFace("right_col0_row0")
right_c0_r1 = TextFace("right_col0_row1")
right_c1_r0 = TextFace("right_col1_row0")
right_c1_r1 = TextFace("right_col1_row1")
right_c1_r2 = TextFace("right_col1_row2")
top_c0_r0 = TextFace("top_col0_row0")
top_c0_r1 = TextFace("top_col0_row1")
bottom_c0_r0 = TextFace("bottom_col0_row0")
bottom_c0_r1 = TextFace("bottom_col0_row1")
aligned_c0_r0 = TextFace("aligned_col0_row0")
aligned_c0_r1 = TextFace("aligned_col0_row1")
aligned_c1_r0 = TextFace("aligned_col1_row0")
aligned_c1_r1 = TextFace("aligned_col1_row1")
all_faces = [right_c0_r0, right_c0_r1, right_c1_r0, right_c1_r1, right_c1_r2, top_c0_r0, \
top_c0_r1, bottom_c0_r0, bottom_c0_r1, aligned_c0_r0, aligned_c0_r1,\
aligned_c1_r0, aligned_c1_r1]
# set a border in all faces
for f in all_faces:
f.margin_border.width = 1
f.margin_bottom = 5
f.margin_top = 5
f.margin_right = 10
t.add_face(right_c0_r0, column=0, position="branch-right")
t.add_face(right_c0_r1, column=0, position="branch-right")
t.add_face(right_c1_r0, column=1, position="branch-right")
t.add_face(right_c1_r1, column=1, position="branch-right")
t.add_face(right_c1_r2, column=1, position="branch-right")
t.add_face(top_c0_r0, column=0, position="branch-top")
t.add_face(top_c0_r1, column=0, position="branch-top")
t.add_face(bottom_c0_r0, column=0, position="branch-bottom")
t.add_face(bottom_c0_r1, column=0, position="branch-bottom")
a = t&"a"
a.set_style(NodeStyle())
a.img_style["bgcolor"] = "lightgreen"
b = t&"b"
b.set_style(NodeStyle())
b.img_style["bgcolor"] = "indianred"
c = t&"c"
c.set_style(NodeStyle())
c.img_style["bgcolor"] = "lightblue"
t.set_style(NodeStyle())
t.img_style["bgcolor"] = "lavender"
t.img_style["size"] = 12
for leaf in t.iter_leaves():
leaf.img_style["size"] = 12
leaf.add_face(right_c0_r0, 0, "branch-right")
leaf.add_face(aligned_c0_r1, 0, "aligned")
leaf.add_face(aligned_c0_r0, 0, "aligned")
leaf.add_face(aligned_c1_r1, 1, "aligned")
leaf.add_face(aligned_c1_r0, 1, "aligned")
ts = TreeStyle()
ts.show_scale = False
t.render("face_positions.png", w=800, tree_style=ts)
|
karrtikr/ete
|
sdoc/face_grid.py
|
Python
|
gpl-3.0
| 2,223
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("subtitles", "0015_auto__chg_field_subtitleversion_serialized_subtitles"),
)
def forwards(self, orm):
# Adding field 'Action.new_language'
db.add_column('videos_action', 'new_language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['subtitles.SubtitleLanguage'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Action.new_language'
db.delete_column('videos_action', 'new_language_id')
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 9, 11, 31, 37, 830646)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 9, 11, 31, 37, 830451)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'subtitles.subtitlelanguage': {
'Meta': {'unique_together': "[('video', 'language_code')]", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'new_followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'official_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_expired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_unexpired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subtitles_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'unofficial_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitlelanguage_set'", 'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'writelocked_newlanguages'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamMember']", 'null': 'True', 'blank': 'True'}),
'new_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'end_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'end_time_ms'"}),
'end_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'end_time'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_of_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'start_time_ms'"}),
'start_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'start_time'"}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitlemetadata': {
'Meta': {'object_name': 'SubtitleMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Subtitle']"})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.subtitleversionmetadata': {
'Meta': {'unique_together': "(('key', 'subtitle_version'),)", 'object_name': 'SubtitleVersionMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['videos.SubtitleVersion']"})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
'primary_audio_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videofeed': {
'Meta': {'object_name': 'VideoFeed'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.videometadata': {
'Meta': {'object_name': 'VideoMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'owner_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos']
|
norayr/unisubs
|
apps/videos/migrations/0152_auto__add_field_action_new_language.py
|
Python
|
agpl-3.0
| 33,110
|
# Generated by Django 1.11.23 on 2019-08-25 16:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import opaque_keys.edx.django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('video_config', '0007_videothumbnailsetting_offset'),
]
operations = [
migrations.CreateModel(
name='CourseYoutubeBlockedFlag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('course_id', opaque_keys.edx.django.models.CourseKeyField(db_index=True, max_length=255)),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
edx/edx-platform
|
openedx/core/djangoapps/video_config/migrations/0008_courseyoutubeblockedflag.py
|
Python
|
agpl-3.0
| 1,257
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gearshifft(CMakePackage):
"""Benchmark Suite for Heterogenuous FFT Implementations"""
homepage = "https://github.com/mpicbg-scicomp/gearshifft"
url = "https://github.com/mpicbg-scicomp/gearshifft/archive/v0.2.1-lw.tar.gz"
maintainers = ['zyzzyxdonta']
version('0.4.0', sha256='15b9e4bfa1d9b4fe4ae316f289c67b7be0774cdada5bd7310df4d0e026d9d227')
# gearshifft used the variable name `CMAKE_DEFAULT_BUILD_TYPE` which was
# later introduced by CMake leading to an error in newer CMake versions.
# This patch renames the variable to `GEARSHIFFT_DEFAULT_BUILD_TYPE`.
patch('gearshifft-v0.4.0-cmake-variable-name.patch', when='@0.4.0')
variant('cufft', default=True,
description='Compile gearshifft_cufft')
variant('clfft', default=True,
description='Compile gearshifft_clfft')
variant('fftw', default=True,
description='Compile gearshifft_fftw')
variant('openmp', default=True,
description='use OpenMP parallel fftw libraries')
# variant('hcfft', default=True,
# description='Not implemented yet')
variant('mkl', default=True,
description='Compile gearshifft_fftwwrappers')
variant('rocfft', default=True,
description='Compile gearshifft_rocfft')
# depends_on C++14 compiler, e.g. GCC 5.0+
depends_on('cmake@2.8.0:', type='build')
depends_on('boost@1.59.0:')
depends_on('cuda@8.0:', when='+cufft')
depends_on('opencl@1.2:', when='+clfft')
depends_on('clfft@2.12.0:', when='+clfft')
depends_on('fftw@3.3.4:~mpi~openmp', when='+fftw~openmp')
depends_on('fftw@3.3.4:~mpi+openmp', when='+fftw+openmp')
depends_on('intel-mkl threads=openmp', when='+mkl')
depends_on('rocfft', when='+rocfft')
def cmake_args(self):
spec = self.spec
args = [
self.define('GEARSHIFFT_FLOAT16_SUPPORT', False),
self.define('GEARSHIFFT_BACKEND_HCFFT', False),
self.define_from_variant('GEARSHIFFT_BACKEND_FFTW', 'fftw'),
self.define('GEARSHIFFT_BACKEND_FFTW_PTHREADS', '~openmp' in spec),
self.define_from_variant('GEARSHIFFT_BACKEND_FFTW_OPENMP', 'openmp'),
self.define_from_variant('GEARSHIFFT_BACKEND_CUFFT', 'cufft'),
self.define_from_variant('GEARSHIFFT_BACKEND_CLFFT', 'clfft'),
self.define_from_variant('GEARSHIFFT_BACKEND_FFTWWRAPPERS', 'mkl'),
self.define_from_variant('GEARSHIFFT_BACKEND_ROCFFT', 'rocfft')
]
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/gearshifft/package.py
|
Python
|
lgpl-2.1
| 2,768
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"Base for c programs/libraries"
import os
import TaskGen, Build, Utils, Task
from Logs import debug
import ccroot
from TaskGen import feature, before, extension, after
g_cc_flag_vars = [
'CCDEPS', 'FRAMEWORK', 'FRAMEWORKPATH',
'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH',
'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CCDEFINES']
EXT_CC = ['.c']
g_cc_type_vars = ['CCFLAGS', 'LINKFLAGS']
# TODO remove in waf 1.6
class cc_taskgen(ccroot.ccroot_abstract):
pass
@feature('cc')
@before('apply_type_vars')
@after('default_cc')
def init_cc(self):
self.p_flag_vars = set(self.p_flag_vars).union(g_cc_flag_vars)
self.p_type_vars = set(self.p_type_vars).union(g_cc_type_vars)
if not self.env['CC_NAME']:
raise Utils.WafError("At least one compiler (gcc, ..) must be selected")
@feature('cc')
@after('apply_incpaths')
def apply_obj_vars_cc(self):
"""after apply_incpaths for INC_PATHS"""
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
app('_CCINCFLAGS', cpppath_st % i.bldpath(env))
app('_CCINCFLAGS', cpppath_st % i.srcpath(env))
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
@feature('cc')
@after('apply_lib_vars')
def apply_defines_cc(self):
"""after uselib is set for CCDEFINES"""
self.defines = getattr(self, 'defines', [])
lst = self.to_list(self.defines) + self.to_list(self.env['CCDEFINES'])
milst = []
# now process the local defines
for defi in lst:
if not defi in milst:
milst.append(defi)
# CCDEFINES_
libs = self.to_list(self.uselib)
for l in libs:
val = self.env['CCDEFINES_'+l]
if val: milst += val
self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]]
y = self.env['CCDEFINES_ST']
self.env['_CCDEFFLAGS'] = [y%x for x in milst]
@extension(EXT_CC)
def c_hook(self, node):
# create the compilation task: cpp or cc
if getattr(self, 'obj_ext', None):
obj_ext = self.obj_ext
else:
obj_ext = '_%d.o' % self.idx
task = self.create_task('cc', node, node.change_ext(obj_ext))
try:
self.compiled_tasks.append(task)
except AttributeError:
raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self))
return task
cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
cls = Task.simple_task_type('cc', cc_str, 'GREEN', ext_out='.o', ext_in='.c', shell=False)
cls.scan = ccroot.scan
cls.vars.append('CCDEPS')
link_str = '${LINK_CC} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}'
cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
|
oneman/xmms2-oneman-old
|
wafadmin/Tools/cc.py
|
Python
|
lgpl-2.1
| 2,882
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBayesm(RPackage):
"""Bayesian Inference for Marketing/Micro-Econometrics
Covers many important models used in marketing and micro-econometrics
applications. The package includes: Bayes Regression (univariate or
multivariate dep var), Bayes Seemingly Unrelated Regression (SUR), Binary
and Ordinal Probit, Multinomial Logit (MNL) and Multinomial Probit (MNP),
Multivariate Probit, Negative Binomial (Poisson) Regression, Multivariate
Mixtures of Normals (including clustering), Dirichlet Process Prior Density
Estimation with normal base, Hierarchical Linear Models with normal prior
and covariates, Hierarchical Linear Models with a mixture of normals prior
and covariates, Hierarchical Multinomial Logits with a mixture of normals
prior and covariates, Hierarchical Multinomial Logits with a Dirichlet
Process prior and covariates, Hierarchical Negative Binomial Regression
Models, Bayesian analysis of choice-based conjoint data, Bayesian treatment
of linear instrumental variables models, Analysis of Multivariate Ordinal
survey data with scale usage heterogeneity (as in Rossi et al, JASA (01)),
Bayesian Analysis of Aggregate Random Coefficient Logit Models as in BLP
(see Jiang, Manchanda, Rossi 2009) For further reference, consult our book,
Bayesian Statistics and Marketing by Rossi, Allenby and McCulloch (Wiley
2005) and Bayesian Non- and Semi-Parametric Methods and Applications
(Princeton U Press 2014)."""
homepage = "https://cloud.r-project.org/package=bayesm"
url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm"
version('3.1-4', sha256='061b216c62bc72eab8d646ad4075f2f78823f9913344a781fa53ea7cf4a48f94')
version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10')
version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736')
version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078')
version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.0:', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-bayesm/package.py
|
Python
|
lgpl-2.1
| 2,588
|
# SU2/opt/__init__.py
from project import Project
from scipy_tools import scipy_slsqp as SLSQP
from scipy_tools import scipy_cg as CG
from scipy_tools import scipy_bfgs as BFGS
from scipy_tools import scipy_powell as POWELL
|
pawhewitt/Dev
|
SU2_PY/SU2/opt/__init__.py
|
Python
|
lgpl-2.1
| 225
|
{
'name' : 'Discount for total amount of pos order',
'version' : '1.0.0',
'author' : 'Ivan Yelizariev',
'category' : 'Point Of Sale',
'website' : 'https://yelizariev.github.io',
'price': 9.00,
'currency': 'EUR',
'depends' : ['point_of_sale'],
'data':[
'data.xml',
],
'installable': True,
'auto_install': False,
}
|
tymiles003/pos-addons
|
pos_discount_total/__openerp__.py
|
Python
|
lgpl-3.0
| 373
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import QCAlgorithm
import numpy as np
import decimal as d
from datetime import datetime, timedelta
### <summary>
### This algorithm showcases two margin related event handlers.
### OnMarginCallWarning: Fired when a portfolio's remaining margin dips below 5% of the total portfolio value
### OnMarginCall: Fired immediately before margin call orders are execued, this gives the algorithm a change to regain margin on its own through liquidation
### </summary>
### <meta name="tag" content="securities and portfolio" />
### <meta name="tag" content="margin models" />
class MarginCallEventsAlgorithm(QCAlgorithm):
"""
This algorithm showcases two margin related event handlers.
OnMarginCallWarning: Fired when a portfolio's remaining margin dips below 5% of the total portfolio value
OnMarginCall: Fired immediately before margin call orders are execued, this gives the algorithm a change to regain margin on its own through liquidation
"""
def Initialize(self):
self.SetCash(100000)
self.SetStartDate(2013,10,1)
self.SetEndDate(2013,12,11)
self.AddEquity("SPY", Resolution.Second)
# cranking up the leverage increases the odds of a margin call
# when the security falls in value
self.Securities["SPY"].SetLeverage(100)
def OnData(self, data):
if not self.Portfolio.Invested:
self.SetHoldings("SPY",100)
def OnMarginCall(self, requests):
# Margin call event handler. This method is called right before the margin call orders are placed in the market.
# <param name="requests">The orders to be executed to bring this algorithm within margin limits</param>
# this code gets called BEFORE the orders are placed, so we can try to liquidate some of our positions
# before we get the margin call orders executed. We could also modify these orders by changing their quantities
for order in requests:
# liquidate an extra 10% each time we get a margin call to give us more padding
newQuantity = int(np.sign(order.Quantity) * order.Quantity * d.Decimal(1.1))
requests.remove(order)
requests.append(SubmitOrderRequest(order.OrderType, order.SecurityType, order.Symbol, newQuantity, order.StopPrice, order.LimitPrice, self.Time, "OnMarginCall"))
return requests
def OnMarginCallWarning(self):
# Margin call warning event handler.
# This method is called when Portoflio.MarginRemaining is under 5% of your Portfolio.TotalPortfolioValue
# a chance to prevent a margin call from occurring
spyHoldings = self.Securities["SPY"].Holdings.Quantity
shares = int(-spyHoldings * d.Decimal(0.005))
self.Error("{0} - OnMarginCallWarning(): Liquidating {1} shares of SPY to avoid margin call.".format(self.Time, shares))
self.MarketOrder("SPY", shares)
|
AnshulYADAV007/Lean
|
Algorithm.Python/MarginCallEventsAlgorithm.py
|
Python
|
apache-2.0
| 3,908
|
"""Auto-generated file, do not edit by hand. KE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KE = PhoneMetadata(id='KE', country_code=254, international_prefix='000',
general_desc=PhoneNumberDesc(national_number_pattern='20\\d{6,7}|[4-9]\\d{6,9}', possible_number_pattern='\\d{7,10}'),
fixed_line=PhoneNumberDesc(national_number_pattern='20\\d{6,7}|4(?:[0136]\\d{7}|[245]\\d{5,7})|5(?:[08]\\d{7}|[1-79]\\d{5,7})|6(?:[01457-9]\\d{5,7}|[26]\\d{7})', possible_number_pattern='\\d{7,9}', example_number='202012345'),
mobile=PhoneNumberDesc(national_number_pattern='7(?:[0-36]\\d|5[0-6]|7[0-5]|8[0-25-9]|9[0-4])\\d{6}', possible_number_pattern='\\d{9}', example_number='712123456'),
toll_free=PhoneNumberDesc(national_number_pattern='800[24-8]\\d{5,6}', possible_number_pattern='\\d{9,10}', example_number='800223456'),
premium_rate=PhoneNumberDesc(national_number_pattern='900[02-9]\\d{5}', possible_number_pattern='\\d{9}', example_number='900223456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{5,7})', format='\\1 \\2', leading_digits_pattern=['[24-6]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{6})', format='\\1 \\2', leading_digits_pattern=['7'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[89]'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
roubert/python-phonenumbers
|
python/phonenumbers/data/region_KE.py
|
Python
|
apache-2.0
| 2,224
|
import typing
MyTup1 = typing.NamedTuple("MyTup2", bar=int, baz=str)
class MyTup2(typing.NamedTuple):
bar: int
baz: str
MyTup1(1, "")._replace(<arg1>)
MyTup2(1, "")._replace(<arg2>)
|
goodwinnk/intellij-community
|
python/testData/paramInfo/TypingNamedTupleReplace.py
|
Python
|
apache-2.0
| 195
|
"""Per-prefix data, mapping each prefix to a name.
Auto-generated file, do not edit by hand.
"""
from phonenumbers.util import u
# Copyright (C) 2011-2015 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TIMEZONE_DATA = {}
from .data0 import data
TIMEZONE_DATA.update(data)
del data
TIMEZONE_LONGEST_PREFIX = 7
|
dongguangming/python-phonenumbers
|
python/tests/testtzdata/__init__.py
|
Python
|
apache-2.0
| 835
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for matrix factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
_factorization_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_factorization_ops.so"))
class WALSModel(object):
r"""A model for Weighted Alternating Least Squares matrix factorization.
It minimizes the following loss function over U, V:
\\(
\|\sqrt W \odot (A - U V^T) \|_F^2 + \lambda (\|U\|_F^2 + \|V\|_F^2)
)\\
where,
A: input matrix,
W: weight matrix. Note that the (element-wise) square root of the weights
is used in the objective function.
U, V: row_factors and column_factors matrices,
\\(\lambda)\\: regularization.
Also we assume that W is of the following special form:
\\( W_{ij} = W_0 + R_i * C_j )\\ if \\(A_{ij} \ne 0)\\,
\\(W_{ij} = W_0)\\ otherwise.
where,
\\(W_0)\\: unobserved_weight,
\\(R_i)\\: row_weights,
\\(C_j)\\: col_weights.
Note that the current implementation supports two operation modes: The default
mode is for the condition where row_factors and col_factors can individually
fit into the memory of each worker and these will be cached. When this
condition can't be met, setting use_factors_weights_cache to False allows the
larger problem sizes with slight performance penalty as this will avoid
creating the worker caches and instead the relevant weight and factor values
are looked up from parameter servers at each step.
Loss computation: The loss can be computed efficiently by decomposing it into
a sparse term and a Gramian term, see wals.md.
The loss is returned by the update_{col, row}_factors(sp_input), and is
normalized as follows:
_, _, minibatch_loss = update_row_factors(sp_input)
if sp_input contains the rows {A_i, i \in I}, and the input matrix A has n
total rows, then minibatch_loss is
\\(
(\|\sqrt W \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) * n / |I| +
\lambda \|V\|_F^2
)\\
A typical usage example (pseudocode):
with tf.Graph().as_default():
# Set up the model object.
model = tf.contrib.factorization.WALSModel(....)
# To be run only once as part of session initialization. In distributed
# training setting, this should only be run by the chief trainer and all
# other trainers should block until this is done.
model_init_op = model.initialize_op
# To be run once per worker after session is available, prior to
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init
# To be run once per interation sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
row_update_prep_gramian_op = model.row_update_prep_gramian_op
col_update_prep_gramian_op = model.col_update_prep_gramian_op
# To be run once per worker per iteration sweep. Must be run before
# any actual update ops can be run.
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op
# Ops to upate row(column). This can either take the entire sparse tensor
# or slices of sparse tensor. For distributed trainer, each trainer
# handles just part of the matrix.
_, row_update_op, row_loss = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
_, col_update_op, col_loss = model.update_col_factors(
sp_input=transposed_matrix_slices_from_queue_for_worker_shard,
transpose_input=True)
...
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
sv = tf.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
with sv.managed_session(...) as sess:
# All workers/trainers run it after session becomes available.
worker_init_op.run(session=sess)
...
while i in iterations:
# All trainers need to sync up here.
while not_all_ready:
wait
# Row update sweep.
if is_chief:
row_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_row_update_op.run(session=sess)
# Go through the matrix.
reset_matrix_slices_queue_for_worker_shard
while_matrix_slices:
row_update_op.run(session=sess)
# All trainers need to sync up here.
while not_all_ready:
wait
# Column update sweep.
if is_chief:
col_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_col_update_op.run(session=sess)
# Go through the matrix.
reset_transposed_matrix_slices_queue_for_worker_shard
while_transposed_matrix_slices:
col_update_op.run(session=sess)
"""
def __init__(self,
input_rows,
input_cols,
n_components,
unobserved_weight=0.1,
regularization=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache=True,
use_gramian_cache=True):
"""Creates model for WALS matrix factorization.
Args:
input_rows: total number of rows for input matrix.
input_cols: total number of cols for input matrix.
n_components: number of dimensions to use for the factors.
unobserved_weight: weight given to unobserved entries of matrix.
regularization: weight of L2 regularization term. If None, no
regularization is done.
row_init: initializer for row factor. Can be a tensor or numpy constant.
If set to "random", the value is initialized randomly.
col_init: initializer for column factor. See row_init for details.
num_row_shards: number of shards to use for row factors.
num_col_shards: number of shards to use for column factors.
row_weights: Must be in one of the following three formats: None, a list
of lists of non-negative real numbers (or equivalent iterables) or a
single non-negative real number.
- When set to None, w_ij = unobserved_weight, which simplifies to ALS.
Note that col_weights must also be set to "None" in this case.
- If it is a list of lists of non-negative real numbers, it needs to be
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unonbserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and w_ij = unobserved_weight + row_weights *
col_weights[j].
Note that it is allowed to have row_weights as a list while col_weights
a single number or vice versa.
col_weights: See row_weights.
use_factors_weights_cache: When True, the factors and weights will be
cached on the workers before the updates start. Defaults to True.
use_gramian_cache: When True, the Gramians will be cached on the workers
before the updates start. Defaults to True.
"""
self._input_rows = input_rows
self._input_cols = input_cols
self._num_row_shards = num_row_shards
self._num_col_shards = num_col_shards
self._n_components = n_components
self._unobserved_weight = unobserved_weight
self._regularization = regularization
self._regularization_matrix = (
regularization * linalg_ops.eye(self._n_components)
if regularization is not None else None)
assert (row_weights is None) == (col_weights is None)
self._row_weights = WALSModel._create_weights(row_weights, self._input_rows,
self._num_row_shards,
"row_weights")
self._col_weights = WALSModel._create_weights(col_weights, self._input_cols,
self._num_col_shards,
"col_weights")
self._use_factors_weights_cache = use_factors_weights_cache
self._use_gramian_cache = use_gramian_cache
self._row_factors = self._create_factors(self._input_rows,
self._n_components,
self._num_row_shards, row_init,
"row_factors")
self._col_factors = self._create_factors(self._input_cols,
self._n_components,
self._num_col_shards, col_init,
"col_factors")
self._row_gramian = self._create_gramian(self._n_components, "row_gramian")
self._col_gramian = self._create_gramian(self._n_components, "col_gramian")
self._row_update_prep_gramian = self._prepare_gramian(self._col_factors,
self._col_gramian)
self._col_update_prep_gramian = self._prepare_gramian(self._row_factors,
self._row_gramian)
self._create_transient_vars()
@property
def row_factors(self):
"""Returns a list of tensors corresponding to row factor shards."""
return self._row_factors
@property
def col_factors(self):
"""Returns a list of tensors corresponding to column factor shards."""
return self._col_factors
@property
def row_weights(self):
"""Returns a list of tensors corresponding to row weight shards."""
return self._row_weights
@property
def col_weights(self):
"""Returns a list of tensors corresponding to col weight shards."""
return self._col_weights
@property
def initialize_op(self):
"""Returns an op for initializing tensorflow variables."""
all_vars = self._row_factors + self._col_factors
all_vars.extend([self._row_gramian, self._col_gramian])
if self._row_weights is not None:
assert self._col_weights is not None
all_vars.extend(self._row_weights + self._col_weights)
return variables.variables_initializer(all_vars)
@classmethod
def _shard_sizes(cls, dims, num_shards):
"""Helper function to split dims values into num_shards."""
shard_size, residual = divmod(dims, num_shards)
return [shard_size + 1] * residual + [shard_size] * (num_shards - residual)
@classmethod
def _create_factors(cls, rows, cols, num_shards, init, name):
"""Helper function to create row and column factors."""
if callable(init):
init = init()
if isinstance(init, list):
assert len(init) == num_shards
elif isinstance(init, str) and init == "random":
pass
elif num_shards == 1:
init = [init]
sharded_matrix = []
sizes = cls._shard_sizes(rows, num_shards)
assert len(sizes) == num_shards
def make_initializer(i, size):
def initializer():
if init == "random":
return random_ops.random_normal([size, cols])
else:
return init[i]
return initializer
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_initializer(i, size)
sharded_matrix.append(
variables.Variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_matrix
@classmethod
def _create_weights(cls, wt_init, num_wts, num_shards, name):
"""Helper function to create sharded weight vector.
Args:
wt_init: init value for the weight. If None, weights are not created. This
can be one of the None, a list of non-negative real numbers or a single
non-negative real number (or equivalent iterables).
num_wts: total size of all the weight shards
num_shards: number of shards for the weights
name: name for the new Variables.
Returns:
A list of weight shard Tensors.
Raises:
ValueError: If wt_init is not the right format.
"""
if wt_init is None:
return None
init_mode = "list"
if isinstance(wt_init, collections.Iterable):
if num_shards == 1 and len(wt_init) == num_wts:
wt_init = [wt_init]
assert len(wt_init) == num_shards
elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
init_mode = "scalar"
else:
raise ValueError(
"Invalid weight initialization argument. Must be one of these: "
"None, a real non-negative real number, or a list of lists of "
"non-negative real numbers (or equivalent iterables) corresponding "
"to sharded factors.")
sizes = cls._shard_sizes(num_wts, num_shards)
assert len(sizes) == num_shards
def make_wt_initializer(i, size):
def initializer():
if init_mode == "scalar":
return wt_init * array_ops.ones([size])
else:
return wt_init[i]
return initializer
sharded_weight = []
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_wt_initializer(i, size)
sharded_weight.append(
variables.Variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_weight
@staticmethod
def _create_gramian(n_components, name):
"""Helper function to create the gramian variable.
Args:
n_components: number of dimensions of the factors from which the gramian
will be calculated.
name: name for the new Variables.
Returns:
A gramian Tensor with shape of [n_components, n_components].
"""
return variables.Variable(
array_ops.zeros([n_components, n_components]),
dtype=dtypes.float32,
name=name)
@staticmethod
def _transient_var(name):
"""Helper function to create a Variable."""
return variables.Variable(
1.0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
def _prepare_gramian(self, factors, gramian):
"""Helper function to create ops to prepare/calculate gramian.
Args:
factors: Variable or list of Variable representing (sharded) factors.
Used to compute the updated corresponding gramian value.
gramian: Variable storing the gramian calculated from the factors.
Returns:
A op that updates the gramian with the calcuated value from the factors.
"""
partial_gramians = []
for f in factors:
with ops.colocate_with(f):
partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))
with ops.colocate_with(gramian):
prep_gramian = state_ops.assign(gramian,
math_ops.add_n(partial_gramians)).op
return prep_gramian
def _cached_copy(self, var, name, pass_through=False):
"""Helper function to create a worker cached copy of a Variable.
This assigns the var (either a single Variable or a list of Variables) to
local transient cache Variable(s). Note that if var is a list of Variables,
the assignment is done sequentially to minimize the memory overheads.
Also note that if pass_through is set to True, this does not create new
Variables but simply return the input back.
Args:
var: A Variable or a list of Variables to cache.
name: name of cached Variable.
pass_through: when set to True, this simply pass through the var back
through identity operator and does not actually creates a cache.
Returns:
Tuple consisting of following three entries:
cache: the new transient Variable or list of transient Variables
corresponding one-to-one with var.
cache_init: op to initialize the Variable or the list of Variables.
cache_reset: op to reset the Variable or the list of Variables to some
default value.
"""
if var is None:
return None, None, None
elif pass_through:
cache = var
cache_init = control_flow_ops.no_op()
cache_reset = control_flow_ops.no_op()
elif isinstance(var, variables.Variable):
cache = WALSModel._transient_var(name=name)
with ops.colocate_with(cache):
cache_init = state_ops.assign(cache, var, validate_shape=False)
cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
else:
assert isinstance(var, list)
assert var
cache = [
WALSModel._transient_var(name="%s_shard_%d" % (name, i))
for i in xrange(len(var))
]
reset_ops = []
for i, c in enumerate(cache):
with ops.colocate_with(c):
if i == 0:
cache_init = state_ops.assign(c, var[i], validate_shape=False)
else:
with ops.control_dependencies([cache_init]):
cache_init = state_ops.assign(c, var[i], validate_shape=False)
reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
cache_reset = control_flow_ops.group(*reset_ops)
return cache, cache_init, cache_reset
def _create_transient_vars(self):
"""Creates local cache of factors, weights and gramian for rows and columns.
Note that currently the caching strategy is as follows:
When initiating a row (resp. column) update:
- The column (resp. row) gramian is computed.
- Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
is cached, while the row (resp. column) gramian is reset.
- Optionally, if use_factors_weights_cache is True, the column (resp. row)
factors and weights are cached, while the row (resp. column) factors and
weights are reset.
"""
(self._row_factors_cache, row_factors_cache_init,
row_factors_cache_reset) = self._cached_copy(
self._row_factors,
"row_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_factors_cache, col_factors_cache_init,
col_factors_cache_reset) = self._cached_copy(
self._col_factors,
"col_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
self._row_weights,
"row_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
self._col_weights,
"col_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_gramian_cache, row_gramian_cache_init,
row_gramian_cache_reset) = self._cached_copy(
self._row_gramian,
"row_gramian_cache",
pass_through=not self._use_gramian_cache)
(self._col_gramian_cache, col_gramian_cache_init,
col_gramian_cache_reset) = self._cached_copy(
self._col_gramian,
"col_gramian_cache",
pass_through=not self._use_gramian_cache)
self._row_updates_init = control_flow_ops.group(col_factors_cache_init,
row_factors_cache_reset,
col_gramian_cache_init,
row_gramian_cache_reset)
self._col_updates_init = control_flow_ops.group(row_factors_cache_init,
col_factors_cache_reset,
row_gramian_cache_init,
col_gramian_cache_reset)
if self._row_wt_cache is not None:
assert self._col_wt_cache is not None
self._worker_init = control_flow_ops.group(
row_wt_cache_init, col_wt_cache_init, name="worker_init")
else:
self._worker_init = control_flow_ops.no_op(name="worker_init")
@property
def worker_init(self):
"""Op to initialize worker state once before starting any updates."""
return self._worker_init
@property
def row_update_prep_gramian_op(self):
"""Op to form the gramian before starting row updates.
Must be run before initialize_row_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
"""
return self._row_update_prep_gramian
@property
def col_update_prep_gramian_op(self):
"""Op to form the gramian before starting col updates.
Must be run before initialize_col_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
"""
return self._col_update_prep_gramian
@property
def initialize_row_update_op(self):
"""Op to initialize worker state before starting row updates."""
return self._row_updates_init
@property
def initialize_col_update_op(self):
"""Op to initialize worker state before starting column updates."""
return self._col_updates_init
@staticmethod
def _get_sharding_func(size, num_shards):
"""Create sharding function for scatter update."""
def func(ids):
if num_shards == 1:
return None, ids
else:
ids_per_shard = size // num_shards
extras = size % num_shards
assignments = math_ops.maximum(ids // (ids_per_shard + 1),
(ids - extras) // ids_per_shard)
new_ids = array_ops.where(assignments < extras,
ids % (ids_per_shard + 1),
(ids - extras) % ids_per_shard)
return assignments, new_ids
return func
@classmethod
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(factor[0], indices, values,
name=name).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(state_ops.scatter_update(factor[i], sharded_ids[i],
sharded_values[i]))
return control_flow_ops.group(*updates, name=name)
def update_row_factors(self, sp_input=None, transpose_input=False):
r"""Updates the row factors.
Args:
sp_input: A SparseTensor representing a subset of rows of the full input
in any order. Please note that this SparseTensor must retain the
indexing as the original input.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row factors.
update_op: An op that assigns the newly computed values to the row
factors.
loss: A tensor (scalar) that contains the normalized minibatch loss,
corresponding to sp_input.
if sp_input contains the rows {A_{i, :}, i \in I}, and the input matrix
A has n total rows, then loss is:
(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) *
n / |I| + \lambda \|V\|_F^2.
"""
return self._process_input_helper(True, sp_input=sp_input,
transpose_input=transpose_input)
def update_col_factors(self, sp_input=None, transpose_input=False):
r"""Updates the column factors.
Args:
sp_input: A SparseTensor representing a subset of columns of the full
input. Please refer to comments for update_row_factors for
restrictions.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following two elements:
new_values: New values for the column factors.
update_op: An op that assigns the newly computed values to the column
factors.
loss: A tensor (scalar) that contains the normalized minibatch loss,
corresponding to sp_input.
If sp_input contains the columns {A_{:, j}, j \in J}, and the input
matrix A has m total columns, then loss is:
(\|\sqrt W_J \odot (A_J - U V_J^T)\|_F^2 + \lambda \|V_J\|_F^2) *
m / |J| + \lambda \|U\|_F^2.
"""
return self._process_input_helper(False, sp_input=sp_input,
transpose_input=transpose_input)
def project_row_factors(self, sp_input=None, transpose_input=False,
projection_weights=None):
"""Projects the row factors.
This computes the row embedding u_i for an observed row a_i by solving
one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of rows. Please note that the
column indices of this SparseTensor must match the model column feature
indexing while the row indices are ignored. The returned results will be
in the same ordering as the input rows.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are projected.
projection_weights: The row weights to be used for the projection. If None
then 1.0 is used. This can be either a scaler or a rank-1 tensor with
the number of elements matching the number of rows to be projected.
Note that the column weights will be determined by the underlying WALS
model.
Returns:
Projected row factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(True, sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def project_col_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the column factors.
This computes the column embedding v_j for an observed column a_j by solving
one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of columns. Please note that
the row indices of this SparseTensor must match the model row feature
indexing while the column indices are ignored. The returned results will
be in the same ordering as the input columns.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are projected.
projection_weights: The column weights to be used for the projection. If
None then 1.0 is used. This can be either a scaler or a rank-1 tensor
with the number of elements matching the number of columns to be
projected. Note that the row weights will be determined by the
underlying WALS model.
Returns:
Projected column factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(False, sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def _process_input_helper(self, update_row_factors,
sp_input=None, transpose_input=False,
row_weights=None):
"""Creates the graph for processing a sparse slice of input.
Args:
update_row_factors: if True, update or project the row_factors, else
update or project the column factors.
sp_input: Please refer to comments for update_row_factors,
update_col_factors, project_row_factors, and project_col_factors for
restrictions.
transpose_input: If True, the input is logically transposed and then the
corresponding rows/columns of the transposed input are updated.
row_weights: If not None, this is the row/column weights to be used for
the update or projection. If None, use the corresponding weights from
the model. Note that the feature (column/row) weights will be
determined by the model. When not None, it can either be a scalar or
a rank-1 tensor with the same number of elements as the number of rows
of columns to be updated/projected.
Returns:
A tuple consisting of the following three elements:
new_values: New values for the row/column factors.
update_op: An op that assigns the newly computed values to the row/column
factors.
loss: A tensor (scalar) that contains the normalized minibatch loss,
corresponding to sp_input.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)
if update_row_factors:
left = self._row_factors
right_factors = self._col_factors_cache
row_wt = self._row_wt_cache
col_wt = self._col_wt_cache
total_rows = self._input_rows
sharding_func = WALSModel._get_sharding_func(self._input_rows,
self._num_row_shards)
gramian = self._col_gramian_cache
else:
left = self._col_factors
right_factors = self._row_factors_cache
row_wt = self._col_wt_cache
col_wt = self._row_wt_cache
total_rows = self._input_cols
sharding_func = WALSModel._get_sharding_func(self._input_cols,
self._num_col_shards)
gramian = self._row_gramian_cache
transpose_input = not transpose_input
# Note that the row indices of sp_input are based on the original full input
# Here we reindex the rows and give them contiguous ids starting at 0.
# We use tf.unique to achieve this reindexing. Note that this is done so
# that the downstream kernel can assume that the input is "dense" along the
# row dimension.
row_ids, col_ids = array_ops.split(
value=sp_input.indices, num_or_size_splits=2, axis=1)
update_row_indices, all_row_ids = array_ops.unique(row_ids[:, 0])
update_col_indices, all_col_ids = array_ops.unique(col_ids[:, 0])
col_ids = array_ops.expand_dims(math_ops.cast(all_col_ids, dtypes.int64), 1)
row_ids = array_ops.expand_dims(math_ops.cast(all_row_ids, dtypes.int64), 1)
if transpose_input:
update_indices = update_col_indices
row_shape = [
math_ops.cast(array_ops.shape(update_row_indices)[0], dtypes.int64)
]
gather_indices = update_row_indices
else:
update_indices = update_row_indices
row_shape = [
math_ops.cast(array_ops.shape(update_col_indices)[0], dtypes.int64)
]
gather_indices = update_col_indices
num_rows = math_ops.cast(array_ops.shape(update_indices)[0], dtypes.int64)
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
new_sp_shape = (array_ops.concat([row_shape, col_shape], 0) if
transpose_input else
array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
dense_shape=new_sp_shape)
# Compute lhs and rhs of the normal equations
total_lhs = (self._unobserved_weight * gramian)
if self._regularization_matrix is not None:
total_lhs += self._regularization_matrix
if self._row_weights is None:
# Special case of ALS. Use a much simpler update rule.
total_rhs = (self._unobserved_weight *
sparse_ops.sparse_tensor_dense_matmul(
new_sp_input, right, adjoint_a=transpose_input))
# TODO(rmlarsen): handle transposing in tf.matrix_solve instead of
# transposing explicitly.
# TODO(rmlarsen): multi-thread tf.matrix_solve.
new_left_values = array_ops.transpose(
linalg_ops.matrix_solve(total_lhs, array_ops.transpose(total_rhs)))
else:
if row_weights is None:
# TODO(yifanchen): Add special handling for single shard without using
# embedding_lookup and perform benchmarks for those cases. Same for
# col_weights lookup below.
row_weights_slice = embedding_ops.embedding_lookup(
row_wt, update_indices, partition_strategy="div")
else:
num_indices = array_ops.shape(update_indices)[0]
with ops.control_dependencies(
[check_ops.assert_less_equal(array_ops.rank(row_weights), 1)]):
row_weights_slice = control_flow_ops.cond(
math_ops.equal(array_ops.rank(row_weights), 0),
lambda: (array_ops.ones([num_indices]) * row_weights),
lambda: math_ops.cast(row_weights, dtypes.float32))
col_weights = embedding_ops.embedding_lookup(
col_wt, gather_indices, partition_strategy="div")
partial_lhs, total_rhs = (
gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
right,
col_weights,
self._unobserved_weight,
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
total_lhs = array_ops.expand_dims(total_lhs, 0) + partial_lhs
total_rhs = array_ops.expand_dims(total_rhs, -1)
new_left_values = array_ops.squeeze(
linalg_ops.matrix_solve(total_lhs, total_rhs), [2])
update_op_name = "row_update" if update_row_factors else "col_update"
update_op = self.scatter_update(left, update_indices, new_left_values,
sharding_func, name=update_op_name)
# Create the loss subgraph
loss_sp_input = (sparse_ops.sparse_transpose(new_sp_input)
if transpose_input else new_sp_input)
# sp_approx is the low rank estimate of the input matrix, formed by
# computing the product <u_i, v_j> for (i, j) in loss_sp_input.indices.
sp_approx_vals = gen_factorization_ops.masked_matmul(
new_left_values, right, loss_sp_input.indices, transpose_a=False,
transpose_b=True)
sp_approx = sparse_tensor.SparseTensor(
loss_sp_input.indices, sp_approx_vals, loss_sp_input.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
sp_residual = sparse_ops.sparse_add(loss_sp_input, sp_approx * (-1))
sp_residual_sq = math_ops.square(sp_residual)
row_wt_mat = (constant_op.constant(0.) if self._row_weights is None else
array_ops.expand_dims(row_weights_slice, 1))
col_wt_mat = (constant_op.constant(0.) if self._col_weights is None else
array_ops.expand_dims(col_weights, 0))
# We return the normalized loss
partial_row_gramian = math_ops.matmul(
new_left_values, new_left_values, transpose_a=True)
normalization_factor = total_rows / math_ops.cast(num_rows, dtypes.float32)
loss = (
self._unobserved_weight * (
sparse_ops.sparse_reduce_sum(sp_residual_sq) -
sparse_ops.sparse_reduce_sum(sp_approx_sq) +
math_ops.trace(math_ops.matmul(partial_row_gramian, gramian))
) +
sparse_ops.sparse_reduce_sum(row_wt_mat * (sp_residual_sq * col_wt_mat))
) * normalization_factor
if self._regularization is not None:
loss += self._regularization * (
math_ops.trace(partial_row_gramian) * normalization_factor +
math_ops.trace(gramian)
)
return (new_left_values, update_op, loss)
|
chenjun0210/tensorflow
|
tensorflow/contrib/factorization/python/ops/factorization_ops.py
|
Python
|
apache-2.0
| 38,639
|
#!/usr/bin/env python
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line application that streams data into BigQuery.
This sample is used on this page:
https://cloud.google.com/bigquery/streaming-data-into-bigquery
For more information, see the README.md under /bigquery.
"""
import argparse
import ast
import json
import uuid
import googleapiclient.discovery
from six.moves import input
# [START stream_row_to_bigquery]
def stream_row_to_bigquery(bigquery, project_id, dataset_id, table_name, row,
num_retries=5):
insert_all_data = {
'rows': [{
'json': row,
# Generate a unique id for each row so retries don't accidentally
# duplicate insert
'insertId': str(uuid.uuid4()),
}]
}
return bigquery.tabledata().insertAll(
projectId=project_id,
datasetId=dataset_id,
tableId=table_name,
body=insert_all_data).execute(num_retries=num_retries)
# [END stream_row_to_bigquery]
# [START run]
def main(project_id, dataset_id, table_name, num_retries):
# [START build_service]
# Construct the service object for interacting with the BigQuery API.
bigquery = googleapiclient.discovery.build('bigquery', 'v2')
# [END build_service]
for row in get_rows():
response = stream_row_to_bigquery(
bigquery, project_id, dataset_id, table_name, row, num_retries)
print(json.dumps(response))
def get_rows():
line = input("Enter a row (python dict) into the table: ")
while line:
yield ast.literal_eval(line)
line = input("Enter another row into the table \n" +
"[hit enter to stop]: ")
# [END run]
# [START main]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Your Google Cloud project ID.')
parser.add_argument('dataset_id', help='A BigQuery dataset ID.')
parser.add_argument(
'table_name', help='Name of the table to load data into.')
parser.add_argument(
'-p', '--poll_interval',
help='How often to poll the query for completion (seconds).',
type=int,
default=1)
parser.add_argument(
'-r', '--num_retries',
help='Number of times to retry in case of 500 error.',
type=int,
default=5)
args = parser.parse_args()
main(
args.project_id,
args.dataset_id,
args.table_name,
args.num_retries)
# [END main]
|
JavaRabbit/CS496_capstone
|
bigquery/api/streaming.py
|
Python
|
apache-2.0
| 3,152
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow.compiler.mlir.tfr.integration.node_expansion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.compiler.mlir.tfr.resources import gen_composite_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
_lib_dir = os.path.dirname(gen_composite_ops.__file__)
_lib_name = os.path.basename(gen_composite_ops.__file__)[4:].replace(
'.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))
class NodeExpansionTest(test.TestCase):
def testAddN(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq1 = gen_composite_ops.my_add_n([t1])
sq2 = gen_composite_ops.my_add_n([t1, t2])
sq3 = gen_composite_ops.my_add_n([t1, t2, t3])
self.assertAllEqual(sq1.numpy().reshape(-1), [1, 2, 3, 4])
self.assertAllEqual(sq2.numpy().reshape(-1), [2, 4, 6, 8])
self.assertAllEqual(sq3.numpy().reshape(-1), [3, 6, 9, 12])
def testBiasedDense(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3)
self.assertAllEqual(sq.numpy().reshape(-1), [-3, 0, 5, 12])
def testBiasedDenseRelu(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
def testWithKnownKernel(self):
def biasd_dense_elu(x, y, z):
dot = gen_composite_ops.my_biased_dense(x, y, z)
return nn_ops.elu(dot) # with known kernel, should not expand.
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = biasd_dense_elu(t1, t2, t3)
self.assertAllClose(sq.numpy().reshape(-1), [-0.950213, 0, 5, 12])
# Regression test for an issue where VarHandleOp wasn't being properly
# imported into MLIR for "no-op" node expansion.
def testVarHandleOp(self):
x = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
# Note: we purposely make multiple calls to VarHandleOp to exercise the
# cached kernal lookup path that was exhibiting the VarHandleOp import
# issue.
unused_ = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
handle = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
gen_resource_variable_ops.AssignVariableOp(resource=handle, value=x)
self.assertAllEqual(
x,
gen_resource_variable_ops.ReadVariableOp(
resource=handle, dtype=dtypes.float32))
if __name__ == '__main__':
os.environ['TF_MLIR_TFR_LIB_DIR'] = 'tensorflow/compiler/mlir/tfr/resources'
ops.enable_eager_execution()
test.main()
|
annarev/tensorflow
|
tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py
|
Python
|
apache-2.0
| 4,072
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
This example shows covariance estimation with Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance:
.. math::
d_{(\mu,\Sigma)}(x_i)^2 = (x_i - \mu)^T\Sigma^{-1}(x_i - \mu)
where :math:`\mu` and :math:`\Sigma` are the location and the covariance of
the underlying Gaussian distributions.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The standard covariance maximum likelihood estimate (MLE) is very
sensitive to the presence of outliers in the data set and therefore,
the downstream Mahalanobis distances also are. It would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the dataset and that the
calculated Mahalanobis distances accurately reflect the true
organization of the observations.
The Minimum Covariance Determinant estimator (MCD) is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea behind the MCD is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance. The MCD was introduced by
P.J.Rousseuw in [1]_.
This example illustrates how the Mahalanobis distances are affected by
outlying data. Observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution when using standard covariance MLE based Mahalanobis
distances. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications include outlier detection,
observation ranking and clustering.
.. note::
See also :ref:`sphx_glr_auto_examples_covariance_plot_robust_vs_empirical_covariance.py`
.. topic:: References:
.. [1] P. J. Rousseeuw. `Least median of squares regression
<http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/LeastMedianOfSquares.pdf>`_. J. Am
Stat Ass, 79:871, 1984.
.. [2] Wilson, E. B., & Hilferty, M. M. (1931). `The distribution of chi-square.
<https://water.usgs.gov/osw/bulletin17b/Wilson_Hilferty_1931.pdf>`_
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
""" # noqa: E501
# %%
# Generate data
# --------------
#
# First, we generate a dataset of 125 samples and 2 features. Both features
# are Gaussian distributed with mean of 0 but feature 1 has a standard
# deviation equal to 2 and feature 2 has a standard deviation equal to 1. Next,
# 25 samples are replaced with Gaussian outlier samples where feature 1 has
# a standard deviation equal to 1 and feature 2 has a standard deviation equal
# to 7.
import numpy as np
# for consistent results
np.random.seed(7)
n_samples = 125
n_outliers = 25
n_features = 2
# generate Gaussian data of shape (125, 2)
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.0
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.0
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# %%
# Comparison of results
# ---------------------
#
# Below, we fit MCD and MLE based covariance estimators to our data and print
# the estimated covariance matrices. Note that the estimated variance of
# feature 2 is much higher with the MLE based estimator (7.5) than
# that of the MCD robust estimator (1.2). This shows that the MCD based
# robust estimator is much more resistant to the outlier samples, which were
# designed to have a much larger variance in feature 2.
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# fit a MCD robust estimator to data
robust_cov = MinCovDet().fit(X)
# fit a MLE estimator to data
emp_cov = EmpiricalCovariance().fit(X)
print(
"Estimated covariance matrix:\nMCD (Robust):\n{}\nMLE:\n{}".format(
robust_cov.covariance_, emp_cov.covariance_
)
)
# %%
# To better visualize the difference, we plot contours of the
# Mahalanobis distances calculated by both methods. Notice that the robust
# MCD based Mahalanobis distances fit the inlier black points much better,
# whereas the MLE based distances are more influenced by the outlier
# red points.
fig, ax = plt.subplots(figsize=(10, 5))
# Plot data set
inlier_plot = ax.scatter(X[:, 0], X[:, 1], color="black", label="inliers")
outlier_plot = ax.scatter(
X[:, 0][-n_outliers:], X[:, 1][-n_outliers:], color="red", label="outliers"
)
ax.set_xlim(ax.get_xlim()[0], 10.0)
ax.set_title("Mahalanobis distances of a contaminated data set")
# Create meshgrid of feature 1 and feature 2 values
xx, yy = np.meshgrid(
np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100),
)
zz = np.c_[xx.ravel(), yy.ravel()]
# Calculate the MLE based Mahalanobis distances of the meshgrid
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = plt.contour(
xx, yy, np.sqrt(mahal_emp_cov), cmap=plt.cm.PuBu_r, linestyles="dashed"
)
# Calculate the MCD based Mahalanobis distances
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = ax.contour(
xx, yy, np.sqrt(mahal_robust_cov), cmap=plt.cm.YlOrBr_r, linestyles="dotted"
)
# Add legend
ax.legend(
[
emp_cov_contour.collections[1],
robust_contour.collections[1],
inlier_plot,
outlier_plot,
],
["MLE dist", "MCD dist", "inliers", "outliers"],
loc="upper right",
borderaxespad=0,
)
plt.show()
# %%
# Finally, we highlight the ability of MCD based Mahalanobis distances to
# distinguish outliers. We take the cubic root of the Mahalanobis distances,
# yielding approximately normal distributions (as suggested by Wilson and
# Hilferty [2]_), then plot the values of inlier and outlier samples with
# boxplots. The distribution of outlier samples is more separated from the
# distribution of inlier samples for robust MCD based Mahalanobis distances.
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.subplots_adjust(wspace=0.6)
# Calculate cubic root of MLE Mahalanobis distances for samples
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
# Plot boxplots
ax1.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=0.25)
# Plot individual samples
ax1.plot(
np.full(n_samples - n_outliers, 1.26),
emp_mahal[:-n_outliers],
"+k",
markeredgewidth=1,
)
ax1.plot(np.full(n_outliers, 2.26), emp_mahal[-n_outliers:], "+k", markeredgewidth=1)
ax1.axes.set_xticklabels(("inliers", "outliers"), size=15)
ax1.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
ax1.set_title("Using non-robust estimates\n(Maximum Likelihood)")
# Calculate cubic root of MCD Mahalanobis distances for samples
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
# Plot boxplots
ax2.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]], widths=0.25)
# Plot individual samples
ax2.plot(
np.full(n_samples - n_outliers, 1.26),
robust_mahal[:-n_outliers],
"+k",
markeredgewidth=1,
)
ax2.plot(np.full(n_outliers, 2.26), robust_mahal[-n_outliers:], "+k", markeredgewidth=1)
ax2.axes.set_xticklabels(("inliers", "outliers"), size=15)
ax2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
ax2.set_title("Using robust estimates\n(Minimum Covariance Determinant)")
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
Python
|
bsd-3-clause
| 8,075
|
import unittest
class SampleTest(unittest.TestCase):
def test_one(self):
pass
|
wfxiang08/django178
|
tests/test_runner/valid_app/tests/__init__.py
|
Python
|
bsd-3-clause
| 93
|
# encoding: utf-8
"""
nlri/__init__.py
Created by Thomas Mangin on 2013-08-07.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
|
mshahbaz/exabgp
|
lib/exabgp/bgp/message/update/nlri/__init__.py
|
Python
|
bsd-3-clause
| 143
|
"""
A monkeypatch for ``django.dispatch`` to send signals safely.
Usage::
>>> import safe_signals
>>> safe_signals.start_the_machine()
``django.dispatch.Signal.send`` is replaced with a safer function that catches
and logs errors. It's like ``Signal.send_robust`` but with logging.
"""
import logging
from django.dispatch.dispatcher import Signal, _make_id
from django.conf import settings
log = logging.getLogger('signals')
def safe_send(self, sender, **named):
responses = []
if not self.receivers:
return responses
do_raise = getattr(settings, 'RAISE_ON_SIGNAL_ERROR', False)
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
if do_raise:
raise
log.error('Error calling signal', exc_info=True)
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
safe_send.__doc__ = Signal.send_robust.__doc__
unsafe_send = Signal.send
def start_the_machine():
# Monkeypatch!
Signal.send = safe_send
Signal.send_robust = safe_send
|
clouserw/olympia
|
lib/misc/safe_signals.py
|
Python
|
bsd-3-clause
| 1,345
|
from __future__ import unicode_literals
import os, sys
sys.path.append('.')
sys.path.append('lib/py')
sys.path.append('erpnext')
import unittest, webnotes
from webnotes.test_runner import make_test_records
make_test_records("Profile")
class TestEmail(unittest.TestCase):
def setUp(self):
webnotes.conn.sql("""update tabProfile set unsubscribed=0""")
webnotes.conn.sql("""delete from `tabBulk Email`""")
def test_send(self):
from webnotes.utils.email_lib import sendmail
#sendmail('test@example.com', subject='Test Mail', msg="Test Content")
def test_bulk(self):
from webnotes.utils.email_lib.bulk import send
send(recipients = ['test@example.com', 'test1@example.com'],
doctype='Profile', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = webnotes.conn.sql("""select * from `tabBulk Email` where status='Not Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_flush(self):
self.test_bulk()
from webnotes.utils.email_lib.bulk import flush
flush(from_test=True)
bulk = webnotes.conn.sql("""select * from `tabBulk Email` where status='Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
def test_unsubscribe(self):
from webnotes.utils.email_lib.bulk import unsubscribe, send
webnotes.form_dict = {
'email':'test@example.com',
'type':'Profile',
'email_field':'email',
"from_test": True
}
unsubscribe()
send(recipients = ['test@example.com', 'test1@example.com'],
doctype='Profile', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = webnotes.conn.sql("""select * from `tabBulk Email` where status='Not Sent'""",
as_dict=1)
self.assertEquals(len(bulk), 1)
self.assertFalse('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_bulk_limit(self):
from webnotes.utils.email_lib.bulk import unsubscribe, send, BulkLimitCrossedError
self.assertRaises(BulkLimitCrossedError, send,
recipients=['test@example.com']*1000,
doctype='Profile', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
if __name__=='__main__':
webnotes.connect()
unittest.main()
|
gangadhar-kadam/mic-wnframework
|
webnotes/tests/test_email.py
|
Python
|
mit
| 2,645
|
#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,9
from multiphonon.backward.plotutils import plot_intermediate_result_sqe as plot
plot(curdir)
from matplotlib import pyplot as plt
plt.show()
|
sns-chops/multiphonon
|
tests/data/work-V/round-5/plot_sqe.py
|
Python
|
mit
| 271
|
# Quick example of loading our data into variables
import pandas as pd
import numpy as np
# Puts tweets into a data frame
df = pd.read_csv('tweets.csv')
# Selects the first column from our data frame
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
# Selects the third column from our data frame
text = df['tweet_text']
print(len(text))
|
lukas/ml-class
|
videos/text-classifier/load-data.py
|
Python
|
gpl-2.0
| 359
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
Functions useful for interfacing with C/C++ functions:
* ``callback`` => Allows you to pass ctypes CFUNCTYPE objects as parameters to
PyROOT functions
* ``objectproxy_realaddress`` => Determine the real address of a ROOT objects
(useful because multiple ObjectProxies can point to the same underlying object)
"""
from __future__ import absolute_import
import ctypes as C
from . import quickroot as QROOT
__all__ = [
'callback',
'objectproxy_realaddress',
]
def callback(cfunc):
"""
Turn a ctypes CFUNCTYPE instance into a value which can be passed into PyROOT
"""
# Note:
# ROOT wants a c_voidp whose addressof() == the call site of the target
# function. This hackery is necessary to achieve that.
return C.c_voidp.from_address(C.cast(cfunc, C.c_voidp).value)
def objectproxy_realaddress(obj):
"""
Obtain a real address as an integer from an objectproxy.
"""
voidp = QROOT.TPython.ObjectProxy_AsVoidPtr(obj)
return C.addressof(C.c_char.from_buffer(voidp))
|
mverzett/rootpy
|
rootpy/utils/cinterface.py
|
Python
|
gpl-3.0
| 1,146
|
#!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_add_mult_div_sub(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def help_ii(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_i(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_i()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_ss(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_s(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_s()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_ff(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_f(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_f()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_cc(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_c(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_c()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
# add_XX
def test_add_ss(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (9, -1, 7, 12, 7)
op = blocks.add_ss()
self.help_ss((src1_data, src2_data), expected_result, op)
def test_add_ii(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (9, -1, 7, 12, 7)
op = blocks.add_ii()
self.help_ii((src1_data, src2_data), expected_result, op)
def test_add_ff(self):
src1_data = (1.0, 2.0, 3.0, 4.0, 5.0)
src2_data = (8.0, -3.0, 4.0, 8.0, 2.0)
expected_result = (9.0, -1.0, 7.0, 12.0, 7.0)
op = blocks.add_ff()
self.help_ff((src1_data, src2_data), expected_result, op)
def test_add_cc(self):
src1_data = (1+1j, 2+2j, 3+3j, 4+4j, 5+5j)
src2_data = (8+8j, -3-3j, 4+4j, 8+8j, 2+2j)
expected_result = (9+9j, -1-1j, 7+7j, 12+12j, 7+7j)
op = blocks.add_cc()
self.help_cc((src1_data, src2_data), expected_result, op)
# add_const_XX
def test_add_const_ss(self):
src_data = (1, 2, 3, 4, 5)
expected_result = (6, 7, 8, 9, 10)
op = blocks.add_const_ss(5)
self.help_ss((src_data,), expected_result, op)
def test_add_const_ii(self):
src_data = (1, 2, 3, 4, 5)
expected_result = (6, 7, 8, 9, 10)
op = blocks.add_const_ii(5)
self.help_ii((src_data,), expected_result, op)
def test_add_const_ff(self):
src_data = (1, 2, 3, 4, 5)
expected_result = (6, 7, 8, 9, 10)
op = blocks.add_const_ff(5)
self.help_ff((src_data,), expected_result, op)
def test_add_const_cc(self):
src_data = (1, 2, 3, 4, 5)
expected_result = (1+5j, 2+5j, 3+5j, 4+5j, 5+5j)
op = blocks.add_const_cc(5j)
self.help_cc((src_data,), expected_result, op)
# multiply_XX
def test_multiply_ss(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (8, -6, 12, 32, 10)
op = blocks.multiply_ss()
self.help_ss((src1_data, src2_data),
expected_result, op)
def test_multiply_ii(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (8, -6, 12, 32, 10)
op = blocks.multiply_ii()
self.help_ii((src1_data, src2_data),
expected_result, op)
def test_multiply_ff(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (8, -6, 12, 32, 10)
op = blocks.multiply_ff()
self.help_ff((src1_data, src2_data),
expected_result, op)
def test_multiply_cc(self):
src1_data = (1+1j, 2+2j, 3+3j, 4+4j, 5+5j)
src2_data = (8, -3, 4, 8, 2)
expected_result = (8+8j, -6-6j, 12+12j, 32+32j, 10+10j)
op = blocks.multiply_cc()
self.help_cc((src1_data, src2_data),
expected_result, op)
# multiply_const_XX
def test_multiply_const_ss(self):
src_data = (-1, 0, 1, 2, 3)
expected_result = (-5, 0, 5, 10, 15)
op = blocks.multiply_const_ss(5)
self.help_ss((src_data,), expected_result, op)
def test_multiply_const_ii(self):
src_data = (-1, 0, 1, 2, 3)
expected_result = (-5, 0, 5, 10, 15)
op = blocks.multiply_const_ii(5)
self.help_ii((src_data,), expected_result, op)
def test_multiply_const_ff(self):
src_data = (-1, 0, 1, 2, 3)
expected_result = (-5, 0, 5, 10, 15)
op = blocks.multiply_const_ff(5)
self.help_ff((src_data,), expected_result, op)
def test_multiply_const_cc(self):
src_data = (-1-1j, 0+0j, 1+1j, 2+2j, 3+3j)
expected_result = (-5-5j, 0+0j, 5+5j, 10+10j, 15+15j)
op = blocks.multiply_const_cc(5)
self.help_cc((src_data,), expected_result, op)
def test_multiply_const_cc2(self):
src_data = (-1-1j, 0+0j, 1+1j, 2+2j, 3+3j)
expected_result = (-3-7j, 0+0j, 3+7j, 6+14j, 9+21j)
op = blocks.multiply_const_cc(5+2j)
self.help_cc((src_data,), expected_result, op)
def test_sub_ii(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (-7, 5, -1, -4, 3)
op = blocks.sub_ii()
self.help_ii((src1_data, src2_data),
expected_result, op)
def test_sub_ii1(self):
src1_data = (1, 2, 3, 4, 5)
expected_result = (1, 2, 3, 4, 5)
src = blocks.vector_source_i(src1_data)
op = blocks.sub_ii()
dst = blocks.vector_sink_i()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_sub_ss(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (-7, 5, -1, -4, 3)
op = blocks.sub_ss()
self.help_ss((src1_data, src2_data),
expected_result, op)
def test_sub_ss1(self):
src1_data = (1, 2, 3, 4, 5)
expected_result = (1, 2, 3, 4, 5)
src = blocks.vector_source_s(src1_data)
op = blocks.sub_ss()
dst = blocks.vector_sink_s()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_sub_ff(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (-7, 5, -1, -4, 3)
op = blocks.sub_ff()
self.help_ff((src1_data, src2_data),
expected_result, op)
def test_sub_ff1(self):
src1_data = (1, 2, 3, 4, 5)
expected_result = (1, 2, 3, 4, 5)
src = blocks.vector_source_f(src1_data)
op = blocks.sub_ff()
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_sub_cc(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (8, -3, 4, 8, 2)
expected_result = (-7, 5, -1, -4, 3)
op = blocks.sub_cc()
self.help_cc((src1_data, src2_data),
expected_result, op)
def test_sub_cc1(self):
src1_data = (1, 2, 3, 4, 5)
expected_result = (1, 2, 3, 4, 5)
src = blocks.vector_source_c(src1_data)
op = blocks.sub_cc()
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_div_ff(self):
src1_data = ( 5, 9, -15, 1024)
src2_data = (10, 3, -5, 64)
expected_result = (0.5, 3, 3, 16)
op = blocks.divide_ff()
self.help_ff((src1_data, src2_data), expected_result, op)
if __name__ == '__main__':
gr_unittest.run(test_add_mult_div_sub, "test_add_mult_div_sub.xml")
|
bastibl/gnuradio
|
gr-blocks/python/blocks/qa_add_mult_div_sub.py
|
Python
|
gpl-3.0
| 9,642
|
# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
#
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
These tests are designed to also run against Windows to confirm the values
returned from Windows.
To run against Windows:
Set the following environment variables:
PASSWORD=Administrator password
USERNAME=Administrator
SMB_CONF_PATH=/dev/null
PYTHONPATH=bin/python
SERVER=Windows server IP
/usr/bin/python source4/scripting/bin/subunitrun
samba.tests.password_hash_ldap.PassWordHashLDAPTests
-U"Administrator%adminpassword"
"""
from samba.tests.password_hash import (
PassWordHashTests,
get_package,
USER_NAME,
USER_PASS
)
from samba.samdb import SamDB
from samba.ndr import ndr_unpack
from samba.dcerpc import drsblobs, drsuapi, misc
from samba import drs_utils, net
from samba.credentials import Credentials
import binascii
import os
def attid_equal(a1, a2):
return (a1 & 0xffffffff) == (a2 & 0xffffffff)
class PassWordHashLDAPTests(PassWordHashTests):
def setUp(self):
super(PassWordHashLDAPTests, self).setUp()
# Get the supplemental credentials for the user under test
def get_supplemental_creds_drs(self):
binding_str = "ncacn_ip_tcp:%s[seal]" % os.environ["SERVER"]
dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
drs = drsuapi.drsuapi(binding_str, self.get_loadparm(), self.creds)
(drs_handle, supported_extensions) = drs_utils.drs_DsBind(drs)
req8 = drsuapi.DsGetNCChangesRequest8()
null_guid = misc.GUID()
req8.destination_dsa_guid = null_guid
req8.source_dsa_invocation_id = null_guid
req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
req8.naming_context.dn = unicode(dn)
req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
req8.highwatermark.tmp_highest_usn = 0
req8.highwatermark.reserved_usn = 0
req8.highwatermark.highest_usn = 0
req8.uptodateness_vector = None
req8.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_GET_ANC |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
drsuapi.DRSUAPI_DRS_WRIT_REP)
req8.max_object_count = 402
req8.max_ndr_size = 402116
req8.extended_op = drsuapi.DRSUAPI_EXOP_REPL_OBJ
req8.fsmo_info = 0
req8.partial_attribute_set = None
req8.partial_attribute_set_ex = None
req8.mapping_ctr.num_mappings = 0
req8.mapping_ctr.mappings = None
(level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
obj_item = ctr.first_object
obj = obj_item.object
sc_blob = None
for i in range(0, obj.attribute_ctr.num_attributes):
attr = obj.attribute_ctr.attributes[i]
if attid_equal(attr.attid,
drsuapi.DRSUAPI_ATTID_supplementalCredentials):
net_ctx = net.Net(self.creds)
net_ctx.replicate_decrypt(drs, attr, 0)
sc_blob = attr.value_ctr.values[0].blob
sc = ndr_unpack(drsblobs.supplementalCredentialsBlob, sc_blob)
return sc
def test_wDigest_supplementalCredentials(self):
self.creds = Credentials()
self.creds.set_username(os.environ["USERNAME"])
self.creds.set_password(os.environ["PASSWORD"])
self.creds.guess(self.lp)
ldb = SamDB("ldap://" + os.environ["SERVER"],
credentials=self.creds,
lp=self.lp)
self.add_user(ldb=ldb)
sc = self.get_supplemental_creds_drs()
(pos, package) = get_package(sc, "Primary:WDigest")
self.assertEquals("Primary:WDigest", package.name)
# Check that the WDigest values are correct.
#
digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
binascii.a2b_hex(package.data))
self.check_wdigests(digests)
|
urisimchoni/samba
|
python/samba/tests/password_hash_ldap.py
|
Python
|
gpl-3.0
| 4,971
|
# -*- coding: utf-8 -*-
# © 2016 Chafique DELLI @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import purchase
|
factorlibre/purchase-workflow
|
purchase_picking_state/__init__.py
|
Python
|
agpl-3.0
| 153
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, cint
from frappe import msgprint, _
from frappe.model.mapper import get_mapped_doc
from erpnext.setup.utils import get_exchange_rate
from erpnext.utilities.transaction_base import TransactionBase
subject_field = "title"
sender_field = "contact_email"
class Opportunity(TransactionBase):
def after_insert(self):
if self.lead:
frappe.get_doc("Lead", self.lead).set_status(update=True)
def validate(self):
self._prev = frappe._dict({
"contact_date": frappe.db.get_value("Opportunity", self.name, "contact_date") if \
(not cint(self.get("__islocal"))) else None,
"contact_by": frappe.db.get_value("Opportunity", self.name, "contact_by") if \
(not cint(self.get("__islocal"))) else None,
})
self.make_new_lead_if_required()
if not self.enquiry_from:
frappe.throw(_("Opportunity From field is mandatory"))
self.set_status()
self.validate_item_details()
self.validate_uom_is_integer("uom", "qty")
self.validate_lead_cust()
self.validate_cust_name()
if not self.title:
self.title = self.customer_name
from erpnext.accounts.utils import validate_fiscal_year
validate_fiscal_year(self.transaction_date, self.fiscal_year, _("Opportunity Date"), self)
def make_new_lead_if_required(self):
"""Set lead against new opportunity"""
if not (self.lead or self.customer):
lead_name = frappe.db.get_value("Lead", {"email_id": self.contact_email})
if not lead_name:
lead = frappe.get_doc({
"doctype": "Lead",
"email_id": self.contact_email,
"lead_name": self.contact_email
})
lead.insert(ignore_permissions=True)
lead_name = lead.name
self.enquiry_from = "Lead"
self.lead = lead_name
def declare_enquiry_lost(self,arg):
if not self.has_quotation():
frappe.db.set(self, 'status', 'Lost')
frappe.db.set(self, 'order_lost_reason', arg)
else:
frappe.throw(_("Cannot declare as lost, because Quotation has been made."))
def on_trash(self):
self.delete_events()
def has_quotation(self):
return frappe.db.get_value("Quotation Item", {"prevdoc_docname": self.name, "docstatus": 1})
def has_ordered_quotation(self):
return frappe.db.sql("""select q.name from `tabQuotation` q, `tabQuotation Item` qi
where q.name = qi.parent and q.docstatus=1 and qi.prevdoc_docname =%s and q.status = 'Ordered'""", self.name)
def validate_cust_name(self):
if self.customer:
self.customer_name = frappe.db.get_value("Customer", self.customer, "customer_name")
elif self.lead:
lead_name, company_name = frappe.db.get_value("Lead", self.lead, ["lead_name", "company_name"])
self.customer_name = company_name or lead_name
def get_cust_address(self,name):
details = frappe.db.sql("""select customer_name, address, territory, customer_group
from `tabCustomer` where name = %s and docstatus != 2""", (name), as_dict = 1)
if details:
ret = {
'customer_name': details and details[0]['customer_name'] or '',
'address' : details and details[0]['address'] or '',
'territory' : details and details[0]['territory'] or '',
'customer_group' : details and details[0]['customer_group'] or ''
}
# ********** get primary contact details (this is done separately coz. , in case there is no primary contact thn it would not be able to fetch customer details in case of join query)
contact_det = frappe.db.sql("""select contact_name, contact_no, email_id
from `tabContact` where customer = %s and is_customer = 1
and is_primary_contact = 'Yes' and docstatus != 2""", name, as_dict = 1)
ret['contact_person'] = contact_det and contact_det[0]['contact_name'] or ''
ret['contact_no'] = contact_det and contact_det[0]['contact_no'] or ''
ret['email_id'] = contact_det and contact_det[0]['email_id'] or ''
return ret
else:
frappe.throw(_("Customer {0} does not exist").format(name), frappe.DoesNotExistError)
def on_update(self):
self.add_calendar_event()
def add_calendar_event(self, opts=None, force=False):
if not opts:
opts = frappe._dict()
opts.description = ""
opts.contact_date = self.contact_date
if self.customer:
if self.contact_person:
opts.description = 'Contact '+cstr(self.contact_person)
else:
opts.description = 'Contact customer '+cstr(self.customer)
elif self.lead:
if self.contact_display:
opts.description = 'Contact '+cstr(self.contact_display)
else:
opts.description = 'Contact lead '+cstr(self.lead)
opts.subject = opts.description
opts.description += '. By : ' + cstr(self.contact_by)
if self.to_discuss:
opts.description += ' To Discuss : ' + cstr(self.to_discuss)
super(Opportunity, self).add_calendar_event(opts, force)
def validate_item_details(self):
if not self.get('items'):
return
# set missing values
item_fields = ("item_name", "description", "item_group", "brand")
for d in self.items:
if not d.item_code:
continue
item = frappe.db.get_value("Item", d.item_code, item_fields, as_dict=True)
for key in item_fields:
if not d.get(key): d.set(key, item.get(key))
def validate_lead_cust(self):
if self.enquiry_from == 'Lead':
if not self.lead:
frappe.throw(_("Lead must be set if Opportunity is made from Lead"))
else:
self.customer = None
elif self.enquiry_from == 'Customer':
if not self.customer:
msgprint("Customer is mandatory if 'Opportunity From' is selected as Customer", raise_exception=1)
else:
self.lead = None
@frappe.whitelist()
def get_item_details(item_code):
item = frappe.db.sql("""select item_name, stock_uom, image, description, item_group, brand
from `tabItem` where name = %s""", item_code, as_dict=1)
return {
'item_name': item and item[0]['item_name'] or '',
'uom': item and item[0]['stock_uom'] or '',
'description': item and item[0]['description'] or '',
'image': item and item[0]['image'] or '',
'item_group': item and item[0]['item_group'] or '',
'brand': item and item[0]['brand'] or ''
}
@frappe.whitelist()
def make_quotation(source_name, target_doc=None):
def set_missing_values(source, target):
quotation = frappe.get_doc(target)
company_currency = frappe.db.get_value("Company", quotation.company, "default_currency")
party_account_currency = frappe.db.get_value("Customer", quotation.customer, "party_account_currency")
if company_currency == party_account_currency:
exchange_rate = 1
else:
exchange_rate = get_exchange_rate(party_account_currency, company_currency)
quotation.currency = party_account_currency or company_currency
quotation.conversion_rate = exchange_rate
quotation.run_method("set_missing_values")
quotation.run_method("calculate_taxes_and_totals")
doclist = get_mapped_doc("Opportunity", source_name, {
"Opportunity": {
"doctype": "Quotation",
"field_map": {
"enquiry_from": "quotation_to",
"enquiry_type": "order_type",
"name": "enq_no",
}
},
"Opportunity Item": {
"doctype": "Quotation Item",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"uom": "stock_uom"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def set_multiple_status(names, status):
names = json.loads(names)
for name in names:
opp = frappe.get_doc("Opportunity", name)
opp.status = status
opp.save()
|
hanselke/erpnext-1
|
erpnext/crm/doctype/opportunity/opportunity.py
|
Python
|
agpl-3.0
| 7,551
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMlbench(RPackage):
"""A collection of artificial and real-world machine learning benchmark
problems, including, e.g., several data sets from the UCI repository."""
homepage = "https://cloud.r-project.org/package=mlbench"
url = "https://cloud.r-project.org/src/contrib/mlbench_2.1-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/mlbench"
version('2.1-1', sha256='748141d56531a39dc4d37cf0a5165a40b653a04c507e916854053ed77119e0e6')
depends_on('r@2.10:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-mlbench/package.py
|
Python
|
lgpl-2.1
| 759
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAiohttpCors(PythonPackage):
"""aiohttp_cors library implements Cross Origin Resource Sharing (CORS)
support for aiohttp asyncio-powered asynchronous HTTP server."""
homepage = "https://github.com/aio-libs/aiohttp-cors"
pypi = "aiohttp_cors/aiohttp-cors-0.7.0.tar.gz"
version('0.7.0', sha256='4d39c6d7100fd9764ed1caf8cebf0eb01bf5e3f24e2e073fda6234bc48b19f5d')
depends_on('python@3.4.1:', type=('build', 'run'))
depends_on('py-setuptools@20.8.1:', type='build')
depends_on('py-aiohttp@1.1:', type=('build', 'run'))
depends_on('py-typing', when='^python@:3.4', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-aiohttp-cors/package.py
|
Python
|
lgpl-2.1
| 827
|
from pyxb.bundles.opengis.raw.ows_1_1 import *
|
CantemoInternal/pyxb
|
pyxb/bundles/opengis/ows_1_1.py
|
Python
|
apache-2.0
| 47
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing."""
# pylint: disable=g-bad-import-order
from tensorflow.python.framework import test_util as _test_util
from tensorflow.python.platform import googletest as _googletest
# pylint: disable=unused-import
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
# pylint: enable=unused-import,g-bad-import-order
import functools
import sys
from tensorflow.python.util.tf_export import tf_export
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member
tf_export(v1=['test.mock'])(mock)
# Import Benchmark class
Benchmark = _googletest.Benchmark # pylint: disable=invalid-name
# Import StubOutForTesting class
StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name
@tf_export('test.main')
def main(argv=None):
"""Runs all unit tests."""
_test_util.InstallStackTraceHandler()
return _googletest.main(argv)
@tf_export(v1=['test.get_temp_dir'])
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
@compatibility(TF2)
This function is removed in TF2. Please use `TestCase.get_temp_dir` instead
in a test case.
Outside of a unit test, obtain a temporary directory through Python's
`tempfile` module.
@end_compatibility
Returns:
The temporary directory.
"""
return _googletest.GetTempDir()
@tf_export(v1=['test.test_src_dir_path'])
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked in runfiles.
"""
return _googletest.test_src_dir_path(relative_path)
@tf_export('test.is_built_with_cuda')
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with CUDA (GPU).
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_cuda():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is built with CUDA.
"""
return _test_util.IsGoogleCudaEnabled()
@tf_export('test.is_built_with_rocm')
def is_built_with_rocm():
"""Returns whether TensorFlow was built with ROCm (GPU) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with ROCm (GPU).
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_rocm():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is NOT built with ROCm.
"""
return _test_util.IsBuiltWithROCm()
@tf_export('test.disable_with_predicate')
def disable_with_predicate(pred, skip_message):
"""Disables the test if pred is true."""
def decorator_disable_with_predicate(func):
@functools.wraps(func)
def wrapper_disable_with_predicate(self, *args, **kwargs):
if pred():
self.skipTest(skip_message)
else:
return func(self, *args, **kwargs)
return wrapper_disable_with_predicate
return decorator_disable_with_predicate
@tf_export('test.is_built_with_gpu_support')
def is_built_with_gpu_support():
"""Returns whether TensorFlow was built with GPU (CUDA or ROCm) support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with GPU.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device("GPU:0"):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
TensorFlow official binary is built with CUDA GPU support.
"""
return is_built_with_cuda() or is_built_with_rocm()
@tf_export('test.is_built_with_xla')
def is_built_with_xla():
"""Returns whether TensorFlow was built with XLA support.
This method should only be used in tests written with `tf.test.TestCase`. A
typical usage is to skip tests that should only run with XLA.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_xla(self):
... if not tf.test.is_built_with_xla():
... self.skipTest("test is only applicable on XLA")
... @tf.function(jit_compile=True)
... def add(x, y):
... return tf.math.add(x, y)
...
... self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)
TensorFlow official binary is built with XLA.
"""
return _test_util.IsBuiltWithXLA()
|
tensorflow/tensorflow
|
tensorflow/python/platform/test.py
|
Python
|
apache-2.0
| 6,216
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for working with inventories.
"""
import csv
import datetime
import functools
import fnmatch
import gzip
import json
import random
import tempfile
from six.moves.urllib_parse import unquote_plus
from c7n.utils import chunks
def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info):
"""Given an inventory csv file, return an iterator over keys
"""
# To avoid thundering herd downloads, we do an immediate yield for
# interspersed i/o
yield None
# Inline these values to avoid the local var lookup, they are constants
# rKey = schema['Key'] # 1
# rIsLatest = schema['IsLatest'] # 3
# rVersionId = schema['VersionId'] # 2
with tempfile.NamedTemporaryFile() as fh:
client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh)
fh.seek(0)
reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r'))
for key_set in chunks(reader, 1000):
keys = []
for kr in key_set:
k = kr[1]
if inventory_filter(ifilters, schema, kr):
continue
k = unquote_plus(k)
if versioned:
if kr[3] == 'true':
keys.append((k, kr[2], True))
else:
keys.append((k, kr[2]))
else:
keys.append(k)
yield keys
def inventory_filter(ifilters, ischema, kr):
if 'IsDeleteMarker' in ischema and kr[ischema['IsDeleteMarker']] == 'true':
return True
for f in ifilters:
if f(ischema, kr):
return True
return False
def load_bucket_inventory(
client, inventory_bucket, inventory_prefix, versioned, ifilters):
"""Given an inventory location for a bucket, return an iterator over keys
on the most recent delivered manifest.
"""
now = datetime.datetime.now()
key_prefix = "%s/%s" % (inventory_prefix, now.strftime('%Y-%m-'))
keys = client.list_objects(
Bucket=inventory_bucket, Prefix=key_prefix).get('Contents', [])
keys = [k['Key'] for k in keys if k['Key'].endswith('.json')]
keys.sort()
if not keys:
# no manifest delivery
return None
latest_manifest = keys[-1]
manifest = client.get_object(Bucket=inventory_bucket, Key=latest_manifest)
manifest_data = json.load(manifest['Body'])
# schema as column name to column index mapping
schema = dict([(k, i) for i, k in enumerate(
[n.strip() for n in manifest_data['fileSchema'].split(',')])])
processor = functools.partial(
load_manifest_file, client, inventory_bucket,
schema, versioned, ifilters)
generators = map(processor, manifest_data.get('files', ()))
return random_chain(generators)
def random_chain(generators):
"""Generator to generate a set of keys from
from a set of generators, each generator is selected
at random and consumed to exhaustion.
"""
while generators:
g = random.choice(generators)
try:
v = g.next()
if v is None:
continue
yield v
except StopIteration:
generators.remove(g)
def get_bucket_inventory(client, bucket, inventory_id):
"""Check a bucket for a named inventory, and return the destination."""
inventories = client.list_bucket_inventory_configurations(
Bucket=bucket).get('InventoryConfigurationList', [])
inventories = {i['Id']: i for i in inventories}
found = fnmatch.filter(inventories, inventory_id)
if not found:
return None
i = inventories[found.pop()]
s3_info = i['Destination']['S3BucketDestination']
return {'bucket': s3_info['Bucket'].rsplit(':')[-1],
'prefix': "%s/%s/%s" % (s3_info['Prefix'], bucket, i['Id'])}
|
taohungyang/cloud-custodian
|
tools/c7n_salactus/c7n_salactus/inventory.py
|
Python
|
apache-2.0
| 4,453
|
from __future__ import unicode_literals
from itertools import chain
from django.conf import settings
from django.contrib.auth import models as auth_app
from django.contrib.auth.management import create_permissions
from django.contrib.auth.models import Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from guardian.core import ObjectPermissionChecker
from guardian.compat import get_user_model
from guardian.exceptions import NotUserNorGroup
from guardian.models import UserObjectPermission, GroupObjectPermission
from guardian.shortcuts import assign_perm
User = get_user_model()
class ObjectPermissionTestCase(TestCase):
def setUp(self):
self.group, created = Group.objects.get_or_create(name='jackGroup')
self.user, created = User.objects.get_or_create(username='jack')
self.user.groups.add(self.group)
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.anonymous_user, created = User.objects.get_or_create(
id=settings.ANONYMOUS_USER_ID,
username='AnonymousUser')
class ObjectPermissionCheckerTest(ObjectPermissionTestCase):
def setUp(self):
super(ObjectPermissionCheckerTest, self).setUp()
# Required if MySQL backend is used :/
create_permissions(auth_app, [], 1)
def test_cache_for_queries_count(self):
settings.DEBUG = True
try:
from django.db import connection
ContentType.objects.clear_cache()
checker = ObjectPermissionChecker(self.user)
# has_perm on Checker should spawn only one query plus one extra
# for fetching the content type first time we check for specific
# model and two more content types as there are additional checks
# at get_user_obj_perms_model and get_group_obj_perms_model
query_count = len(connection.queries)
res = checker.has_perm("change_group", self.group)
self.assertEqual(len(connection.queries), query_count + 4)
# Checking again shouldn't spawn any queries
query_count = len(connection.queries)
res_new = checker.has_perm("change_group", self.group)
self.assertEqual(res, res_new)
self.assertEqual(len(connection.queries), query_count)
# Checking for other permission but for Group object again
# shouldn't spawn any query too
query_count = len(connection.queries)
checker.has_perm("delete_group", self.group)
self.assertEqual(len(connection.queries), query_count)
# Checking for same model but other instance should spawn 1 query
new_group = Group.objects.create(name='new-group')
query_count = len(connection.queries)
checker.has_perm("change_group", new_group)
self.assertEqual(len(connection.queries), query_count + 1)
# Checking for permission for other model should spawn 3 queries
# (again: content type and actual permissions for the object...
query_count = len(connection.queries)
checker.has_perm("change_user", self.user)
self.assertEqual(len(connection.queries), query_count + 2)
finally:
settings.DEBUG = False
def test_init(self):
self.assertRaises(NotUserNorGroup, ObjectPermissionChecker,
user_or_group=ContentType())
self.assertRaises(NotUserNorGroup, ObjectPermissionChecker)
def test_anonymous_user(self):
user = AnonymousUser()
check = ObjectPermissionChecker(user)
# assert anonymous user has no object permissions at all for obj
self.assertTrue( [] == list(check.get_perms(self.ctype)) )
def test_superuser(self):
user = User.objects.create(username='superuser', is_superuser=True)
check = ObjectPermissionChecker(user)
ctype = ContentType.objects.get_for_model(self.ctype)
perms = sorted(chain(*Permission.objects
.filter(content_type=ctype)
.values_list('codename')))
self.assertEqual(perms, check.get_perms(self.ctype))
for perm in perms:
self.assertTrue(check.has_perm(perm, self.ctype))
def test_not_active_superuser(self):
user = User.objects.create(username='not_active_superuser',
is_superuser=True, is_active=False)
check = ObjectPermissionChecker(user)
ctype = ContentType.objects.get_for_model(self.ctype)
perms = sorted(chain(*Permission.objects
.filter(content_type=ctype)
.values_list('codename')))
self.assertEqual(check.get_perms(self.ctype), [])
for perm in perms:
self.assertFalse(check.has_perm(perm, self.ctype))
def test_not_active_user(self):
user = User.objects.create(username='notactive')
assign_perm("change_contenttype", user, self.ctype)
# new ObjectPermissionChecker is created for each User.has_perm call
self.assertTrue(user.has_perm("change_contenttype", self.ctype))
user.is_active = False
self.assertFalse(user.has_perm("change_contenttype", self.ctype))
# use on one checker only (as user's is_active attr should be checked
# before try to use cache
user = User.objects.create(username='notactive-cache')
assign_perm("change_contenttype", user, self.ctype)
check = ObjectPermissionChecker(user)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
user.is_active = False
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_get_perms(self):
group = Group.objects.create(name='group')
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
assign_perms = {
group: ('change_group', 'delete_group'),
obj1: ('change_contenttype', 'delete_contenttype'),
obj2: ('delete_contenttype',),
}
check = ObjectPermissionChecker(self.user)
for obj, perms in assign_perms.items():
for perm in perms:
UserObjectPermission.objects.assign_perm(perm, self.user, obj)
self.assertEqual(sorted(perms), sorted(check.get_perms(obj)))
check = ObjectPermissionChecker(self.group)
for obj, perms in assign_perms.items():
for perm in perms:
GroupObjectPermission.objects.assign_perm(perm, self.group, obj)
self.assertEqual(sorted(perms), sorted(check.get_perms(obj)))
|
TabbedOut/django-guardian
|
guardian/tests/core_test.py
|
Python
|
bsd-2-clause
| 6,852
|
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""
Organizaţia Internaţională a Aviaţiei Civile propune un alfabet în care
fiecărei litere îi este asignat un cuvânt pentru a evita problemele în
înțelegerea mesajelor critice.
Pentru a se păstra un istoric al conversațiilor s-a decis transcrierea lor
conform următoarelor reguli:
- fiecare cuvânt este scris pe o singură linie
- literele din alfabet sunt separate de o virgulă
Următoarea sarcină ți-a fost asignată:
Scrie un program care să primească un fișier ce conține mesajul
brut (scris folosind alfabetul ICAO) și generează un fișier
numit icao_intrare ce va conține mesajul inițial.
Mai jos găsiți un dicționar ce conține o versiune a alfabetului ICAO:
"""
from __future__ import print_function
ICAO = {
'a': 'alfa', 'b': 'bravo', 'c': 'charlie', 'd': 'delta', 'e': 'echo',
'f': 'foxtrot', 'g': 'golf', 'h': 'hotel', 'i': 'india', 'j': 'juliett',
'k': 'kilo', 'l': 'lima', 'm': 'mike', 'n': 'november', 'o': 'oscar',
'p': 'papa', 'q': 'quebec', 'r': 'romeo', 's': 'sierra', 't': 'tango',
'u': 'uniform', 'v': 'victor', 'w': 'whiskey', 'x': 'x-ray', 'y': 'yankee',
'z': 'zulu'
}
def decripteaza(mesaj, fisier):
"""
Functia cauta cuvantul in dictionar si pentru valaorea din mesajul
dat scrie cheia in fisierul dat ca parametru.
"""
for cuvant in mesaj.split():
for key, value in ICAO.items():
if cuvant == value:
fisier.write(key)
def din_icao(fisier_intrare):
"""
Funcția va primi calea către fișierul ce conține mesajul brut și
va genera un fișier numit icao_intrare ce va conține mesajul inițial.
"""
try:
fisier = open(fisier_intrare, "r")
mesaje = fisier.read()
fisier.close()
except IOError:
print("Nu am putut obtine mesajele.")
return
fisier = open("icao_intrare", "w+")
for mesaj in mesaje.splitlines():
decripteaza(mesaj, fisier)
fisier.write(" ")
fisier.close()
if __name__ == "__main__":
din_icao("mesaj.icao")
|
iulianbute/labs
|
python/solutii/anda_ungureanu/from_icao.py
|
Python
|
mit
| 2,123
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import (absolute_import, division,
print_function, with_statement)
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from io import BytesIO
from tornado.concurrent import Future, is_future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.util import (import_object, ObjectDict, raise_exc_info,
unicode_type, _websocket_mask)
from tornado.httputil import split_host_and_port
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(br"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode('utf-8')
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if (len(value) > 4000 or
RequestHandler._INVALID_HEADER_CHAR_RE.search(value)):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is a cached version of `get_current_user`, which you can
override to set the user based on, e.g., a cookie. If that
method is not overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
val = lambda x: x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish()
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionadded:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if is_future(result):
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
There is a subtle interaction between ``data_received`` and asynchronous
``prepare``: The first call to ``data_received`` may occur at any point
after the call to ``prepare`` has returned *or yielded*.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(httputil.HTTPServerConnectionDelegate):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `URLSpec` objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
The request class can be specified as either a class object or a
(fully-qualified) name.
Each tuple can contain additional elements, which correspond to the
arguments to the `URLSpec` constructor. (Prior to Tornado 3.2, this
only tuples of two or three elements were allowed).
A dictionary may be passed as the third element of the tuple,
which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
"""
def __init__(self, handlers=None, default_host="", transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, (tuple, list)):
assert len(spec) in (2, 3, 4)
spec = URLSpec(*spec)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = split_host_and_port(request.host.lower())[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def start_request(self, server_conn, request_conn):
# Modern HTTPServer interface
return _RequestDispatcher(self, request_conn)
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = _RequestDispatcher(self, None)
dispatcher.set_request(request)
return dispatcher.execute()
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _RequestDispatcher(httputil.HTTPMessageDelegate):
def __init__(self, application, connection):
self.application = application
self.connection = connection
self.request = None
self.chunks = []
self.handler_class = None
self.handler_kwargs = None
self.path_args = []
self.path_kwargs = {}
def headers_received(self, start_line, headers):
self.set_request(httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line,
headers=headers))
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def set_request(self, request):
self.request = request
self._find_handler()
self.stream_request_body = _has_stream_request_body(self.handler_class)
def _find_handler(self):
# Identify the handler to use as soon as we have the request.
# Save url path arguments for later.
app = self.application
handlers = app._get_host_handlers(self.request)
if not handlers:
self.handler_class = RedirectHandler
self.handler_kwargs = dict(url="%s://%s/"
% (self.request.protocol,
app.default_host))
return
for spec in handlers:
match = spec.regex.match(self.request.path)
if match:
self.handler_class = spec.handler_class
self.handler_kwargs = spec.kwargs
if spec.regex.groups:
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if spec.regex.groupindex:
self.path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
self.path_args = [_unquote_or_none(s)
for s in match.groups()]
return
if app.settings.get('default_handler_class'):
self.handler_class = app.settings['default_handler_class']
self.handler_kwargs = app.settings.get(
'default_handler_args', {})
else:
self.handler_class = ErrorHandler
self.handler_kwargs = dict(status_code=404)
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
f = self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will end
(calling `RequestHandler.finish` if it hasn't already been called),
but the outgoing response will not be modified and the error-handling
methods (including `RequestHandler.write_error`) will not be called.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video), but this handler should not be used with
files that are too large to fit comfortably in memory.
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
root = os.path.abspath(root)
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
return mime_type
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += b', Accept-Encoding'
else:
headers['Vary'] = b'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any groups
in the regex will be passed in to the handler's get/post/etc
methods as arguments.
* ``handler``: `RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`Application.reverse_url`.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
if isinstance(handler, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
handler = import_object(handler)
self.handler_class = handler
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
url = URLSpec
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unamteched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return escape.url_unescape(s, encoding=None, plus=False)
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/tornado/web.py
|
Python
|
mit
| 125,071
|
# Natural Language Toolkit: Translation metrics
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Will Zhang <wilzzha@gmail.com>
# Guan Gui <ggui@student.unimelb.edu.au>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import division
def alignment_error_rate(reference, hypothesis, possible=None):
"""
Return the Alignment Error Rate (AER) of an alignment
with respect to a "gold standard" reference alignment.
Return an error rate between 0.0 (perfect alignment) and 1.0 (no
alignment).
>>> from nltk.translate import Alignment
>>> ref = Alignment([(0, 0), (1, 1), (2, 2)])
>>> test = Alignment([(0, 0), (1, 2), (2, 1)])
>>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS
0.6666666666666667
:type reference: Alignment
:param reference: A gold standard alignment (sure alignments)
:type hypothesis: Alignment
:param hypothesis: A hypothesis alignment (aka. candidate alignments)
:type possible: Alignment or None
:param possible: A gold standard reference of possible alignments
(defaults to *reference* if None)
:rtype: float or None
"""
if possible is None:
possible = reference
else:
assert(reference.issubset(possible)) # sanity check
return (1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) /
float(len(hypothesis) + len(reference)))
|
sdoran35/hate-to-hugs
|
venv/lib/python3.6/site-packages/nltk/translate/metrics.py
|
Python
|
mit
| 1,498
|
"""
Support for the LIFX platform that implements lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lifx/
"""
# pylint: disable=missing-docstring
import colorsys
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, ATTR_TRANSITION, Light)
from homeassistant.helpers.event import track_time_change
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['liffylights==0.9.4']
DEPENDENCIES = []
CONF_SERVER = "server" # server address configuration item
CONF_BROADCAST = "broadcast" # broadcast address configuration item
SHORT_MAX = 65535 # short int maximum
BYTE_MAX = 255 # byte maximum
TEMP_MIN = 2500 # lifx minimum temperature
TEMP_MAX = 9000 # lifx maximum temperature
TEMP_MIN_HASS = 154 # home assistant minimum temperature
TEMP_MAX_HASS = 500 # home assistant maximum temperature
class LIFX():
"""Representation of a LIFX light."""
def __init__(self, add_devices_callback,
server_addr=None, broadcast_addr=None):
"""Initialize the light."""
import liffylights
self._devices = []
self._add_devices_callback = add_devices_callback
self._liffylights = liffylights.LiffyLights(
self.on_device,
self.on_power,
self.on_color,
server_addr,
broadcast_addr)
def find_bulb(self, ipaddr):
"""Search for bulbs."""
bulb = None
for device in self._devices:
if device.ipaddr == ipaddr:
bulb = device
break
return bulb
# pylint: disable=too-many-arguments
def on_device(self, ipaddr, name, power, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is None:
_LOGGER.debug("new bulb %s %s %d %d %d %d %d",
ipaddr, name, power, hue, sat, bri, kel)
bulb = LIFXLight(self._liffylights, ipaddr, name,
power, hue, sat, bri, kel)
self._devices.append(bulb)
self._add_devices_callback([bulb])
else:
_LOGGER.debug("update bulb %s %s %d %d %d %d %d",
ipaddr, name, power, hue, sat, bri, kel)
bulb.set_power(power)
bulb.set_color(hue, sat, bri, kel)
bulb.update_ha_state()
# pylint: disable=too-many-arguments
def on_color(self, ipaddr, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_color(hue, sat, bri, kel)
bulb.update_ha_state()
def on_power(self, ipaddr, power):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_power(power)
bulb.update_ha_state()
# pylint: disable=unused-argument
def poll(self, now):
"""Initialize the light."""
self.probe()
def probe(self, address=None):
"""Initialize the light."""
self._liffylights.probe(address)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the LIFX platform."""
server_addr = config.get(CONF_SERVER, None)
broadcast_addr = config.get(CONF_BROADCAST, None)
lifx_library = LIFX(add_devices_callback, server_addr, broadcast_addr)
# Register our poll service
track_time_change(hass, lifx_library.poll, second=[10, 40])
lifx_library.probe()
def convert_rgb_to_hsv(rgb):
"""Convert Home Assistant RGB values to HSV values."""
red, green, blue = [_ / BYTE_MAX for _ in rgb]
hue, saturation, brightness = colorsys.rgb_to_hsv(red, green, blue)
return [int(hue * SHORT_MAX),
int(saturation * SHORT_MAX),
int(brightness * SHORT_MAX)]
# pylint: disable=too-many-instance-attributes
class LIFXLight(Light):
"""Representation of a LIFX light."""
# pylint: disable=too-many-arguments
def __init__(self, liffy, ipaddr, name, power, hue,
saturation, brightness, kelvin):
"""Initialize the light."""
_LOGGER.debug("LIFXLight: %s %s",
ipaddr, name)
self._liffylights = liffy
self._ip = ipaddr
self.set_name(name)
self.set_power(power)
self.set_color(hue, saturation, brightness, kelvin)
@property
def should_poll(self):
"""No polling needed for LIFX light."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def ipaddr(self):
"""Return the IP address of the device."""
return self._ip
@property
def rgb_color(self):
"""Return the RGB value."""
_LOGGER.debug("rgb_color: [%d %d %d]",
self._rgb[0], self._rgb[1], self._rgb[2])
return self._rgb
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = int(self._bri / (BYTE_MAX + 1))
_LOGGER.debug("brightness: %d", brightness)
return brightness
@property
def color_temp(self):
"""Return the color temperature."""
temperature = int(TEMP_MIN_HASS + (TEMP_MAX_HASS - TEMP_MIN_HASS) *
(self._kel - TEMP_MIN) / (TEMP_MAX - TEMP_MIN))
_LOGGER.debug("color_temp: %d", temperature)
return temperature
@property
def is_on(self):
"""Return true if device is on."""
_LOGGER.debug("is_on: %d", self._power)
return self._power != 0
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_TRANSITION in kwargs:
fade = kwargs[ATTR_TRANSITION] * 1000
else:
fade = 0
if ATTR_RGB_COLOR in kwargs:
hue, saturation, brightness = \
convert_rgb_to_hsv(kwargs[ATTR_RGB_COLOR])
else:
hue = self._hue
saturation = self._sat
brightness = self._bri
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS] * (BYTE_MAX + 1)
else:
brightness = self._bri
if ATTR_COLOR_TEMP in kwargs:
# pylint: disable=fixme
# TODO: Use color_temperature_mired_to_kelvin from util.color
kelvin = int(((TEMP_MAX - TEMP_MIN) *
(kwargs[ATTR_COLOR_TEMP] - TEMP_MIN_HASS) /
(TEMP_MAX_HASS - TEMP_MIN_HASS)) + TEMP_MIN)
else:
kelvin = self._kel
_LOGGER.debug("turn_on: %s (%d) %d %d %d %d %d",
self._ip, self._power,
hue, saturation, brightness, kelvin, fade)
if self._power == 0:
self._liffylights.set_power(self._ip, 65535, fade)
self._liffylights.set_color(self._ip, hue, saturation,
brightness, kelvin, fade)
def turn_off(self, **kwargs):
"""Turn the device off."""
if ATTR_TRANSITION in kwargs:
fade = kwargs[ATTR_TRANSITION] * 1000
else:
fade = 0
_LOGGER.debug("turn_off: %s %d", self._ip, fade)
self._liffylights.set_power(self._ip, 0, fade)
def set_name(self, name):
"""Set name of the light."""
self._name = name
def set_power(self, power):
"""Set power state value."""
_LOGGER.debug("set_power: %d", power)
self._power = (power != 0)
def set_color(self, hue, sat, bri, kel):
"""Set color state values."""
self._hue = hue
self._sat = sat
self._bri = bri
self._kel = kel
red, green, blue = colorsys.hsv_to_rgb(hue / SHORT_MAX,
sat / SHORT_MAX,
bri / SHORT_MAX)
red = int(red * BYTE_MAX)
green = int(green * BYTE_MAX)
blue = int(blue * BYTE_MAX)
_LOGGER.debug("set_color: %d %d %d %d [%d %d %d]",
hue, sat, bri, kel, red, green, blue)
self._rgb = [red, green, blue]
|
deisi/home-assistant
|
homeassistant/components/light/lifx.py
|
Python
|
mit
| 8,424
|
class Singleton(object):
"""
Singleton design pattern.
"""
_instance = None
def __init__(self):
if self.__class__ is Singleton:
raise TypeError("Cannot instantiate directly.")
@classmethod
def get_instance(cls):
"""Get the class instance."""
if cls._instance is None:
cls._instance = cls()
return cls._instance
|
adamBrinek/tuned
|
tuned/tuned/patterns.py
|
Python
|
gpl-2.0
| 335
|
# -*- coding: utf-8 -*-
# code by Avigdor (https://github.com/cubicle-vdo/xbmc-israel)
import urllib, sys, xbmcplugin ,xbmcgui, xbmcaddon, xbmc, os, json
AddonID = 'plugin.video.playlistLoader'
Addon = xbmcaddon.Addon(AddonID)
AddonName = Addon.getAddonInfo("name")
icon = Addon.getAddonInfo('icon')
addonDir = Addon.getAddonInfo('path').decode("utf-8")
libDir = os.path.join(addonDir, 'resources', 'lib')
sys.path.insert(0, libDir)
import common
addon_data_dir = os.path.join(xbmc.translatePath("special://userdata/addon_data" ).decode("utf-8"), AddonID)
if not os.path.exists(addon_data_dir):
os.makedirs(addon_data_dir)
playlistsFile = os.path.join(addon_data_dir, "playLists.txt")
tmpListFile = os.path.join(addon_data_dir, 'tempList.txt')
favoritesFile = os.path.join(addon_data_dir, 'favorites.txt')
if not (os.path.isfile(favoritesFile)):
f = open(favoritesFile, 'w')
f.write('[]')
f.close()
def getLocaleString(id):
return Addon.getLocalizedString(id).encode('utf-8')
def Categories():
AddDir("[COLOR yellow][B]{0}[/B][/COLOR]".format(getLocaleString(10001)), "settings" , 20, os.path.join(addonDir, "resources", "images", "NewList.ico"), isFolder=False)
AddDir("[COLOR white][B][{0}][/B][/COLOR]".format(getLocaleString(10003)), "favorites" ,30 ,os.path.join(addonDir, "resources", "images", "bright_yellow_star.png"))
i = 0
list = common.ReadList(playlistsFile)
for item in list:
mode = 1 if item["url"].find(".plx") > 0 else 2
name = common.GetEncodeString(item["name"])
image = item.get('image', '')
if mode == 1:
logos = ''
else:
logos = item.get('logos', '')
AddDir("[COLOR blue][{0}][/COLOR]".format(name) ,item["url"].encode("utf-8"), mode, image.encode("utf-8"), logos.encode("utf-8"), index=i)
i += 1
def AddNewList():
listName = GetKeyboardText(getLocaleString(10004)).strip()
if len(listName) < 1:
return
listUrl = GetChoice(10002, 10005, 10006, 10016, 10017, fileType=1, fileMask='.plx|.m3u|.m3u8')
if len(listUrl) < 1:
return
image = GetChoice(10022, 10022, 10022, 10024, 10025, 10021, fileType=2)
if listUrl.endswith('.plx'):
logosUrl = ''
else:
logosUrl = GetChoice(10018, 10019, 10020, 10019, 10020, 10021, fileType=0)
if logosUrl.startswith('http') and not logosUrl.endswith('/'):
logosUrl += '/'
list = common.ReadList(playlistsFile)
for item in list:
if item["url"].lower() == listUrl.lower():
xbmc.executebuiltin('Notification({0}, "{1}" {2}, 5000, {3})'.format(AddonName, listName, getLocaleString(10007), icon))
return
list.append({"name": listName.decode("utf-8"), "url": listUrl, "image": image, "logos": logosUrl})
if common.SaveList(playlistsFile, list):
xbmc.executebuiltin("XBMC.Container.Refresh()")
def GetChoice(choiceTitle, fileTitle, urlTitle, choiceFile, choiceUrl, choiceNone=None, fileType=1, fileMask=None, defaultText=""):
choice = ''
choiceList = [getLocaleString(choiceFile), getLocaleString(choiceUrl)]
if choiceNone != None:
choiceList.append(getLocaleString(choiceNone))
method = GetSourceLocation(getLocaleString(choiceTitle), choiceList)
if method == 0:
if not defaultText.startswith('http'):
defaultText = ""
choice = GetKeyboardText(getLocaleString(fileTitle), defaultText).strip().decode("utf-8")
elif method == 1:
if defaultText.startswith('http'):
defaultText = ""
choice = xbmcgui.Dialog().browse(fileType, getLocaleString(urlTitle), 'myprograms', fileMask, False, False, defaultText).decode("utf-8")
return choice
def RemoveFromLists(index, listFile):
list = common.ReadList(listFile)
if index < 0 or index >= len(list):
return
del list[index]
common.SaveList(listFile, list)
xbmc.executebuiltin("XBMC.Container.Refresh()")
def PlxCategory(url):
tmpList = []
list = common.plx2list(url)
background = list[0]["background"]
for channel in list[1:]:
iconimage = "" if not channel.has_key("thumb") else common.GetEncodeString(channel["thumb"])
name = common.GetEncodeString(channel["name"])
if channel["type"] == 'playlist':
AddDir("[COLOR blue][{0}][/COLOR]".format(name) ,channel["url"].encode("utf-8"), 1, iconimage, background=background.encode("utf-8"))
else:
AddDir(name, channel["url"].encode("utf-8"), 3, iconimage, isFolder=False, background=background)
tmpList.append({"url": channel["url"], "image": iconimage.decode("utf-8"), "name": name.decode("utf-8")})
common.SaveList(tmpListFile, tmpList)
def m3uCategory(url, logos):
tmpList = []
list = common.m3u2list(url)
for channel in list:
name = common.GetEncodeString(channel["display_name"])
image = channel.get("tvg_logo", "")
if image == "":
image = channel.get("logo", "")
if logos is not None and logos != '' and image is not None and image != '' and not image.startswith('http'):
image = logos + image
url = common.GetEncodeString(channel["url"])
AddDir(name ,url, 3, image, isFolder=False)
tmpList.append({"url": url.decode("utf-8"), "image": image.decode("utf-8"), "name": name.decode("utf-8")})
common.SaveList(tmpListFile, tmpList)
def PlayUrl(name, url, iconimage=None):
print '--- Playing "{0}". {1}'.format(name, url)
listitem = xbmcgui.ListItem(path=url, thumbnailImage=iconimage)
listitem.setInfo(type="Video", infoLabels={ "Title": name })
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
def AddDir(name, url, mode, iconimage, logos="", index=-1, move=0, isFolder=True, background=None):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&logos="+urllib.quote_plus(logos)+"&index="+str(index)+"&move="+str(move)
liz = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={ "Title": name})
listMode = 21 # Lists
if background != None:
liz.setProperty('fanart_image', background)
if mode == 1 or mode == 2:
items = [(getLocaleString(10008), 'XBMC.RunPlugin({0}?index={1}&mode=22)'.format(sys.argv[0], index)),
(getLocaleString(10026), 'XBMC.RunPlugin({0}?index={1}&mode=23)'.format(sys.argv[0], index)),
(getLocaleString(10027), 'XBMC.RunPlugin({0}?index={1}&mode=24)'.format(sys.argv[0], index)),
(getLocaleString(10028), 'XBMC.RunPlugin({0}?index={1}&mode=25)'.format(sys.argv[0], index))]
if mode == 2:
items.append((getLocaleString(10029), 'XBMC.RunPlugin({0}?index={1}&mode=26)'.format(sys.argv[0], index)))
elif mode == 3:
liz.setProperty('IsPlayable', 'true')
liz.addContextMenuItems(items = [('{0}'.format(getLocaleString(10009)), 'XBMC.RunPlugin({0}?url={1}&mode=31&iconimage={2}&name={3})'.format(sys.argv[0], urllib.quote_plus(url), iconimage, name))])
elif mode == 32:
liz.setProperty('IsPlayable', 'true')
items = [(getLocaleString(10010), 'XBMC.RunPlugin({0}?index={1}&mode=33)'.format(sys.argv[0], index)),
(getLocaleString(10026), 'XBMC.RunPlugin({0}?index={1}&mode=35)'.format(sys.argv[0], index)),
(getLocaleString(10027), 'XBMC.RunPlugin({0}?index={1}&mode=36)'.format(sys.argv[0], index)),
(getLocaleString(10028), 'XBMC.RunPlugin({0}?index={1}&mode=37)'.format(sys.argv[0], index))]
listMode = 38 # Favourits
if mode == 1 or mode == 2 or mode == 32:
items += [(getLocaleString(10030), 'XBMC.RunPlugin({0}?index={1}&mode={2}&move=-1)'.format(sys.argv[0], index, listMode)),
(getLocaleString(10031), 'XBMC.RunPlugin({0}?index={1}&mode={2}&move=1)'.format(sys.argv[0], index, listMode)),
(getLocaleString(10032), 'XBMC.RunPlugin({0}?index={1}&mode={2}&move=0)'.format(sys.argv[0], index, listMode))]
liz.addContextMenuItems(items)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=isFolder)
def GetKeyboardText(title = "", defaultText = ""):
keyboard = xbmc.Keyboard(defaultText, title)
keyboard.doModal()
text = "" if not keyboard.isConfirmed() else keyboard.getText()
return text
def GetSourceLocation(title, list):
dialog = xbmcgui.Dialog()
answer = dialog.select(title, list)
return answer
def AddFavorites(url, iconimage, name):
favList = common.ReadList(favoritesFile)
for item in favList:
if item["url"].lower() == url.decode("utf-8").lower():
xbmc.executebuiltin("Notification({0}, '{1}' {2}, 5000, {3})".format(AddonName, name, getLocaleString(10011), icon))
return
list = common.ReadList(tmpListFile)
for channel in list:
if channel["name"].lower() == name.decode("utf-8").lower():
url = channel["url"].encode("utf-8")
iconimage = channel["image"].encode("utf-8")
break
if not iconimage:
iconimage = ""
data = {"url": url.decode("utf-8"), "image": iconimage.decode("utf-8"), "name": name.decode("utf-8")}
favList.append(data)
common.SaveList(favoritesFile, favList)
xbmc.executebuiltin("Notification({0}, '{1}' {2}, 5000, {3})".format(AddonName, name, getLocaleString(10012), icon))
def ListFavorites():
AddDir("[COLOR yellow][B]{0}[/B][/COLOR]".format(getLocaleString(10013)), "favorites" ,34 ,os.path.join(addonDir, "resources", "images", "bright_yellow_star.png"), isFolder=False)
list = common.ReadList(favoritesFile)
i = 0
for channel in list:
AddDir(channel["name"].encode("utf-8"), channel["url"].encode("utf-8"), 32, channel["image"].encode("utf-8"), index=i, isFolder=False)
i += 1
def AddNewFavorite():
chName = GetKeyboardText(getLocaleString(10014))
if len(chName) < 1:
return
chUrl = GetKeyboardText(getLocaleString(10015))
if len(chUrl) < 1:
return
image = GetChoice(10023, 10023, 10023, 10024, 10025, 10021, fileType=2)
favList = common.ReadList(favoritesFile)
for item in favList:
if item["url"].lower() == chUrl.decode("utf-8").lower():
xbmc.executebuiltin("Notification({0}, '{1}' {2}, 5000, {3})".format(AddonName, chName, getLocaleString(10011), icon))
return
data = {"url": chUrl.decode("utf-8"), "image": image, "name": chName.decode("utf-8")}
favList.append(data)
if common.SaveList(favoritesFile, favList):
xbmc.executebuiltin("XBMC.Container.Refresh()")
def ChangeKey(index, listFile, key, title):
list = common.ReadList(listFile)
str = GetKeyboardText(getLocaleString(title), list[index][key].encode("utf-8"))
if len(str) < 1:
return
list[index][key] = str.decode("utf-8")
if common.SaveList(listFile, list):
xbmc.executebuiltin("XBMC.Container.Refresh()")
def ChangeChoice(index, listFile, key, choiceTitle, fileTitle, urlTitle, choiceFile, choiceUrl, choiceNone=None, fileType=1, fileMask=None):
list = common.ReadList(listFile)
if key == "logos":
listUrl = list[index].get("url", "")
if listUrl.endswith('.plx'):
return
defaultText = list[index].get(key, "")
str = GetChoice(choiceTitle, fileTitle, urlTitle, choiceFile, choiceUrl, choiceNone, fileType, fileMask, defaultText.encode("utf-8"))
if key == "url" and len(str) < 1:
return
elif key == "logos" and str.startswith('http') and not str.endswith('/'):
str += '/'
list[index][key] = str.decode("utf-8")
if common.SaveList(listFile, list):
xbmc.executebuiltin("XBMC.Container.Refresh()")
def MoveInList(index, step, listFile):
theList = common.ReadList(listFile)
if index + step >= len(theList) or index + step < 0:
return
if step == 0:
step = GetIndexFromUser(len(theList), index)
if step < 0:
tempList = theList[0:index + step] + [theList[index]] + theList[index + step:index] + theList[index + 1:]
elif step > 0:
tempList = theList[0:index] + theList[index + 1:index + 1 + step] + [theList[index]] + theList[index + 1 + step:]
else:
return
common.SaveList(listFile, tempList)
xbmc.executebuiltin("XBMC.Container.Refresh()")
def GetIndexFromUser(listLen, index):
dialog = xbmcgui.Dialog()
location = dialog.input('{0} (1-{1})'.format(getLocaleString(10033), listLen), type=xbmcgui.INPUT_NUMERIC)
if location is None or location == "":
return 0
try:
location = int(location) - 1
if location >= listLen or location < 0:
return 0
except:
return 0
return location - index
def get_params():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?','')
if (params[len(params)-1] == '/'):
params = params[0:len(params)-2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0].lower()] = splitparams[1]
return param
params=get_params()
url=None
logos=None
name=None
mode=None
iconimage=None
description=None
try:
url = urllib.unquote_plus(params["url"])
except:
pass
try:
logos = urllib.unquote_plus(params.get("logos", ''))
except:
pass
try:
name = urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage = urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode = int(params["mode"])
except:
pass
try:
index = int(params["index"])
except:
pass
try:
move = int(params["move"])
except:
pass
if mode == None:
Categories()
elif mode == 1:
PlxCategory(url)
elif mode == 2:
m3uCategory(url, logos)
elif mode == 3 or mode == 32:
PlayUrl(name, url, iconimage)
elif mode == 20:
AddNewList()
elif mode == 21:
MoveInList(index, move, playlistsFile)
elif mode == 22:
RemoveFromLists(index, playlistsFile)
elif mode == 23:
ChangeKey(index, playlistsFile, "name", 10004)
elif mode == 24:
ChangeChoice(index, playlistsFile, "url", 10002, 10005, 10006, 10016, 10017, None, 1, '.plx|.m3u|.m3u8')
elif mode == 25:
ChangeChoice(index, playlistsFile, "image", 10022, 10022, 10022, 10024, 10025, 10021, 2)
elif mode == 26:
ChangeChoice(index, playlistsFile, "logos", 10018, 10019, 10020, 10019, 10020, 10021, 0)
elif mode == 27:
common.DelFile(playlistsFile)
sys.exit()
elif mode == 30:
ListFavorites()
elif mode == 31:
AddFavorites(url, iconimage, name)
elif mode == 33:
RemoveFromLists(index, favoritesFile)
elif mode == 34:
AddNewFavorite()
elif mode == 35:
ChangeKey(index, favoritesFile, "name", 10014)
elif mode == 36:
ChangeKey(index, favoritesFile, "url", 10015)
elif mode == 37:
ChangeChoice(index, favoritesFile, "image", 10023, 10023, 10023, 10024, 10025, 10021, 2)
elif mode == 38:
MoveInList(index, move, favoritesFile)
elif mode == 39:
common.DelFile(favoritesFile)
sys.exit()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
wndias/bc.repository
|
plugin.video.playlistLoader/default.py
|
Python
|
gpl-2.0
| 14,265
|
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
The Telnet protocol.
"""
from Exscript.util.tty import get_terminal_size
from Exscript.protocols import telnetlib
from Exscript.protocols.Protocol import Protocol
from Exscript.protocols.Exception import ProtocolException, \
TimeoutException, \
DriverReplacedException, \
ExpectCancelledException
class Telnet(Protocol):
"""
The Telnet protocol adapter.
"""
def __init__(self, **kwargs):
Protocol.__init__(self, **kwargs)
self.tn = None
def _telnetlib_received(self, data):
self._receive_cb(data)
self.buffer.append(data)
def _connect_hook(self, hostname, port):
assert self.tn is None
rows, cols = get_terminal_size()
self.tn = telnetlib.Telnet(hostname,
port or 23,
connect_timeout = self.connect_timeout,
termsize = (rows, cols),
termtype = self.termtype,
stderr = self.stderr,
receive_callback = self._telnetlib_received)
if self.debug >= 5:
self.tn.set_debuglevel(1)
if self.tn is None:
return False
return True
def send(self, data):
self._dbg(4, 'Sending %s' % repr(data))
try:
self.tn.write(data)
except Exception:
self._dbg(1, 'Error while writing to connection')
raise
def _domatch(self, prompt, flush):
if flush:
func = self.tn.expect
else:
func = self.tn.waitfor
# Wait for a prompt.
clean = self.get_driver().clean_response_for_re_match
self.response = None
try:
result, match, self.response = func(prompt, self.timeout, cleanup = clean)
except Exception:
self._dbg(1, 'Error while waiting for ' + repr(prompt))
raise
if match:
self._dbg(2, "Got a prompt, match was %s" % repr(match.group()))
self.buffer.pop(len(self.response))
self._dbg(5, "Response was %s" % repr(self.response))
if result == -1:
error = 'Error while waiting for response from device'
raise TimeoutException(error)
if result == -2:
if self.driver_replaced:
self.driver_replaced = False
raise DriverReplacedException()
else:
raise ExpectCancelledException()
if self.response is None:
raise ProtocolException('whoops - response is None')
return result, match
def cancel_expect(self):
self.tn.cancel_expect = True
def _set_terminal_size(self, rows, cols):
self.tn.set_window_size(rows, cols)
def interact(self, key_handlers = None, handle_window_size = True):
return self._open_shell(self.tn.sock, key_handlers, handle_window_size)
def close(self, force = False):
if self.tn is None:
return
if not force:
try:
self.response = self.tn.read_all()
except Exception:
pass
self.tn.close()
self.tn = None
self.buffer.clear()
|
mpenning/exscript
|
src/Exscript/protocols/Telnet.py
|
Python
|
gpl-2.0
| 4,121
|
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2005-2006 Ole André Vadla Ravnås <oleavr@gmail.com>
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from constants import ProtocolConstant
def _msn_challenge(data):
"""
Compute an answer for MSN Challenge from a given data
@param data: the challenge string sent by the server
@type data: string
"""
import struct
import hashlib
def little_endify(value, c_type="L"):
"""Transform the given value into little endian"""
return struct.unpack(">" + c_type, struct.pack("<" + c_type, value))[0]
md5_digest = hashlib.md5(data + ProtocolConstant.PRODUCT_KEY).digest()
# Make array of md5 string ints
md5_integers = struct.unpack("<llll", md5_digest)
md5_integers = [(x & 0x7fffffff) for x in md5_integers]
# Make array of chl string ints
data += ProtocolConstant.PRODUCT_ID
amount = 8 - len(data) % 8
data += "".zfill(amount)
chl_integers = struct.unpack("<%di" % (len(data)/4), data)
# Make the key
high = 0
low = 0
i = 0
while i < len(chl_integers) - 1:
temp = chl_integers[i]
temp = (ProtocolConstant.CHL_MAGIC_NUM * temp) % 0x7FFFFFFF
temp += high
temp = md5_integers[0] * temp + md5_integers[1]
temp = temp % 0x7FFFFFFF
high = chl_integers[i + 1]
high = (high + temp) % 0x7FFFFFFF
high = md5_integers[2] * high + md5_integers[3]
high = high % 0x7FFFFFFF
low = low + high + temp
i += 2
high = little_endify((high + md5_integers[1]) % 0x7FFFFFFF)
low = little_endify((low + md5_integers[3]) % 0x7FFFFFFF)
key = (high << 32L) + low
key = little_endify(key, "Q")
longs = [x for x in struct.unpack(">QQ", md5_digest)]
longs = [little_endify(x, "Q") for x in longs]
longs = [x ^ key for x in longs]
longs = [little_endify(abs(x), "Q") for x in longs]
out = ""
for value in longs:
value = hex(long(value))
value = value[2:-1]
value = value.zfill(16)
out += value.lower()
return out
|
billiob/papyon
|
papyon/msnp/challenge.py
|
Python
|
gpl-2.0
| 2,914
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) Enrique J. Hernández 2014
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Methods related to mail notifications.
This is specific to OpenChange.
"""
from email.mime.text import MIMEText
import smtplib
def notify_user_email(from_addr, email, tracker_url,
smtp_addr='localhost', duplicated=False):
"""Notify a user after sending a report to know track the issue later.
:param str from_addr: the from email address
:param str email: the user's email address
:param str tracker_url: the tracker URL
:param str smtp_addr: the STMP server
:param bool duplicated: indicating if the sent crash report is duplicated or not
"""
to_addr = email
if duplicated:
text = """This crash report is a duplicate from {0}.""".format(tracker_url)
else:
text = """The crash report was created at {0}.""".format(tracker_url)
text += """
\n\nYou can follow the crash report fixing status there.\n\n
Thanks very much for reporting it!\n
----
OpenChange team"""
msg = MIMEText(text, 'plain')
msg['Subject'] = '[OpenChange crash report] Your crash report was uploaded!'
msg['From'] = from_addr
msg['To'] = to_addr
s = smtplib.SMTP(smtp_addr)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(from_addr, to_addr, msg.as_string())
s.quit()
|
Zentyal/openchange
|
script/bug-analysis/buganalysis/mail.py
|
Python
|
gpl-3.0
| 2,084
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Vote', fields ['object_id', 'content_type', 'user']
db.delete_unique(u'vote', ['object_id', 'content_type_id', 'user_id'])
# Deleting field 'Vote.content_type'
db.delete_column(u'vote', 'content_type_id')
# Deleting field 'Vote.object_id'
db.delete_column(u'vote', 'object_id')
# Changing field 'Vote.voted_post'
db.alter_column(u'vote', 'voted_post_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['askbot.Post']))
# Adding unique constraint on 'Vote', fields ['user', 'voted_post']
db.create_unique(u'vote', ['user_id', 'voted_post_id'])
# Deleting field 'Activity.question'
db.delete_column(u'activity', 'question_id')
def backwards(self, orm):
# Removing unique constraint on 'Vote', fields ['user', 'voted_post']
db.delete_unique(u'vote', ['user_id', 'voted_post_id'])
# User chose to not deal with backwards NULL issues for 'Vote.content_type'
raise RuntimeError("Cannot reverse this migration. 'Vote.content_type' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Vote.object_id'
raise RuntimeError("Cannot reverse this migration. 'Vote.object_id' and its values cannot be restored.")
# Changing field 'Vote.voted_post'
db.alter_column(u'vote', 'voted_post_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['askbot.Post']))
# Adding unique constraint on 'Vote', fields ['object_id', 'content_type', 'user']
db.create_unique(u'vote', ['object_id', 'content_type_id', 'user_id'])
# Adding field 'Activity.question'
db.add_column(u'activity', 'question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Question'], null=True), keep_default=False)
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Answer']", 'null': 'True', 'blank': 'True'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
PearsonIOKI/compose-forum
|
askbot/migrations/0093_auto__del_field_vote_content_type__del_field_vote_object_id__chg_field.py
|
Python
|
gpl-3.0
| 32,357
|
"""Tests for http/cookiejar.py."""
import os
import re
import test.support
import time
import unittest
import urllib.request
from http.cookiejar import (time2isoz, http2time, iso2time, time2netscape,
parse_ns_headers, join_header_words, split_header_words, Cookie,
CookieJar, DefaultCookiePolicy, LWPCookieJar, MozillaCookieJar,
LoadError, lwp_cookie_str, DEFAULT_HTTP_PORT, escape_path,
reach, is_HDN, domain_match, user_domain_match, request_path,
request_port, request_host)
class DateTimeTests(unittest.TestCase):
def test_time2isoz(self):
base = 1019227000
day = 24*3600
self.assertEqual(time2isoz(base), "2002-04-19 14:36:40Z")
self.assertEqual(time2isoz(base+day), "2002-04-20 14:36:40Z")
self.assertEqual(time2isoz(base+2*day), "2002-04-21 14:36:40Z")
self.assertEqual(time2isoz(base+3*day), "2002-04-22 14:36:40Z")
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assertRegex(text, r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$",
"bad time2isoz format: %s %s" % (az, bz))
def test_http2time(self):
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEqual(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0))
# this test will break around year 2070
self.assertEqual(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0))
# this test will break around year 2048
self.assertEqual(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
# test http2time for supported dates. Test cases with 2 digit year
# will probably break in year 2044.
tests = [
'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format
'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format
'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format
'03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday)
'03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday)
'03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday)
'03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds)
'03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz)
'02-Feb-1994 24:00', # broken rfc850 (no weekday, no seconds,
# no tz) using hour 24 with yesterday date
'03-Feb-94', # old rfc850 HTTP format (no weekday, no time)
'03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time)
'03 Feb 1994', # proposed new HTTP format (no weekday, no time)
# A few tests with extra space at various places
' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
result = time2isoz(test_t)
expected = "1994-02-03 00:00:00Z"
self.assertEqual(result, expected,
"%s => '%s' (%s)" % (test_t, result, expected))
for s in tests:
self.assertEqual(http2time(s), test_t, s)
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
def test_http2time_garbage(self):
for test in [
'',
'Garbage',
'Mandag 16. September 1996',
'01-00-1980',
'01-13-1980',
'00-01-1980',
'32-01-1980',
'01-01-1980 25:00:00',
'01-01-1980 00:61:00',
'01-01-1980 00:00:62',
]:
self.assertIsNone(http2time(test),
"http2time(%s) is not None\n"
"http2time(test) %s" % (test, http2time(test)))
def test_iso2time(self):
def parse_date(text):
return time.gmtime(iso2time(text))[:6]
# ISO 8601 compact format
self.assertEqual(parse_date("19940203T141529Z"),
(1994, 2, 3, 14, 15, 29))
# ISO 8601 with time behind UTC
self.assertEqual(parse_date("1994-02-03 07:15:29 -0700"),
(1994, 2, 3, 14, 15, 29))
# ISO 8601 with time ahead of UTC
self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"),
(1994, 2, 3, 14, 15, 29))
def test_iso2time_formats(self):
# test iso2time for supported dates.
tests = [
'1994-02-03 00:00:00 -0000', # ISO 8601 format
'1994-02-03 00:00:00 +0000', # ISO 8601 format
'1994-02-03 00:00:00', # zone is optional
'1994-02-03', # only date
'1994-02-03T00:00:00', # Use T as separator
'19940203', # only date
'1994-02-02 24:00:00', # using hour-24 yesterday date
'19940203T000000Z', # ISO 8601 compact format
# A few tests with extra space at various places
' 1994-02-03 ',
' 1994-02-03T00:00:00 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
for s in tests:
self.assertEqual(iso2time(s), test_t, s)
self.assertEqual(iso2time(s.lower()), test_t, s.lower())
self.assertEqual(iso2time(s.upper()), test_t, s.upper())
def test_iso2time_garbage(self):
for test in [
'',
'Garbage',
'Thursday, 03-Feb-94 00:00:00 GMT',
'1980-00-01',
'1980-13-01',
'1980-01-00',
'1980-01-32',
'1980-01-01 25:00:00',
'1980-01-01 00:61:00',
'01-01-1980 00:00:62',
'01-01-1980T00:00:62',
'19800101T250000Z'
'1980-01-01 00:00:00 -2500',
]:
self.assertIsNone(iso2time(test),
"iso2time(%s) is not None\n"
"iso2time(test) %s" % (test, iso2time(test)))
class HeaderTests(unittest.TestCase):
def test_parse_ns_headers(self):
# quotes should be stripped
expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
for hdr in [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
]:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_version(self):
# quotes should be stripped
expected = [[('foo', 'bar'), ('version', '1')]]
for hdr in [
'foo=bar; version="1"',
'foo=bar; Version="1"',
]:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
# Cookie with name 'expires'
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
joined = join_header_words([[("foo", None), ("bar", "baz")]])
self.assertEqual(joined, "foo; bar=baz")
self.assertEqual(join_header_words([[]]), "")
def test_split_header_words(self):
tests = [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
(" foo= ", [[("foo", "")]]),
(" foo=", [[("foo", "")]]),
(" foo= ; ", [[("foo", "")]]),
(" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]),
("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
# doesn't really matter if this next fails, but it works ATM
("foo= bar=baz", [[("foo", "bar=baz")]]),
("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]),
("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]),
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, io
f = io.StringIO()
traceback.print_exc(None, f)
result = "(error -- traceback follows)\n\n%s" % f.getvalue()
self.assertEqual(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
def test_roundtrip(self):
tests = [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
("foo=", 'foo=""'),
("foo=bar bar=baz", "foo=bar; bar=baz"),
("foo=bar;bar=baz", "foo=bar; bar=baz"),
('foo bar baz', "foo; bar; baz"),
(r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'),
('foo,,,bar', 'foo, bar'),
('foo=bar,bar=baz', 'foo=bar, bar=baz'),
('text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'),
('foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'),
(r'Basic realm="\"foo\\\\bar\""',
r'Basic; realm="\"foo\\\\bar\""')
]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEqual(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
""" % (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import email
self._headers = email.message_from_string("\n".join(headers))
self._url = url
def info(self): return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
req = urllib.request.Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(unittest.TestCase):
def test_lwp_valueless_cookie(self):
# cookies with no value should be saved and loaded consistently
filename = test.support.TESTFN
c = LWPCookieJar()
interact_netscape(c, "http://www.acme.com/", 'boo')
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
def test_bad_magic(self):
# OSErrors (eg. file doesn't exist) are allowed to propagate
filename = test.support.TESTFN
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
try:
c.load(filename="for this test to work, a file with this "
"filename should not exist")
except OSError as exc:
# an OSError subclass (likely FileNotFoundError), but not
# LoadError
self.assertIsNot(exc.__class__, LoadError)
else:
self.fail("expected OSError for invalid filename")
# Invalid contents of cookies file (eg. bad magic string)
# causes a LoadError.
try:
with open(filename, "w") as f:
f.write("oops\n")
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
self.assertRaises(LoadError, c.load, filename)
finally:
try: os.unlink(filename)
except OSError: pass
class CookieTests(unittest.TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third-party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting every time I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
pol = DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = urllib.request.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assertTrue(r)
else: self.assertFalse(r)
def test_missing_value(self):
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# name, and by http.cookiejar as a missing value
filename = test.support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]["eggs"]
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, "eggs")
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, '"spam"')
self.assertEqual(lwp_cookie_str(cookie), (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0'))
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
self.assertEqual(
repr(c),
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
)
self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
def test_rfc2109_handling(self):
# RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
# dependent on policy settings
for rfc2109_as_netscape, rfc2965, version in [
# default according to rfc2965 if not explicitly specified
(None, False, 0),
(None, True, 1),
# explicit rfc2109_as_netscape
(False, False, None), # version None here means no cookie stored
(False, True, 1),
(True, False, 0),
(True, True, 0),
]:
policy = DefaultCookiePolicy(
rfc2109_as_netscape=rfc2109_as_netscape,
rfc2965=rfc2965)
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
try:
cookie = c._cookies["www.example.com"]["/"]["ni"]
except KeyError:
self.assertIsNone(version) # didn't expect a stored cookie
else:
self.assertEqual(cookie.version, version)
# 2965 cookies are unaffected
interact_2965(c, "http://www.example.com/",
"foo=bar; Version=1")
if rfc2965:
cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
interact_netscape(c, 'http://www.acme.com/', 'fortytwo=')
interact_netscape(c, 'http://www.acme.com/', '=unladenswallow')
interact_netscape(c, 'http://www.acme.com/', 'holyhandgrenade')
cookie = c._cookies[".acme.com"]["/"]["spam"]
self.assertEqual(cookie.domain, ".acme.com")
self.assertTrue(cookie.domain_specified)
self.assertEqual(cookie.port, DEFAULT_HTTP_PORT)
self.assertFalse(cookie.port_specified)
# case is preserved
self.assertTrue(cookie.has_nonstandard_attr("blArgh"))
self.assertFalse(cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
self.assertEqual(cookie.domain, "www.acme.com")
self.assertFalse(cookie.domain_specified)
self.assertEqual(cookie.port, "80,8080")
self.assertTrue(cookie.port_specified)
cookie = c._cookies["www.acme.com"]["/"]["nini"]
self.assertIsNone(cookie.port)
self.assertFalse(cookie.port_specified)
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
self.assertIsNone(foo.expires)
self.assertIsNone(spam.expires)
cookie = c._cookies['www.acme.com']['/']['fortytwo']
self.assertIsNotNone(cookie.value)
self.assertEqual(cookie.value, '')
# there should be a distinction between a present but empty value
# (above) and a value that's entirely missing (below)
cookie = c._cookies['www.acme.com']['/']['holyhandgrenade']
self.assertIsNone(cookie.value)
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assertIn('expires', cookies)
self.assertIn('version', cookies)
def test_expires(self):
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
self.assertEqual(len(c), 1)
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
self.assertNotIn("foo", h)
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
self.assertEqual(len(c), 3)
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEqual(len(c), 1)
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
self.assertEqual(len(c), 2)
c.clear_session_cookies()
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
# test if fractional expiry is accepted
cookie = Cookie(0, "name", "value",
None, False, "www.python.org",
True, False, "/",
False, False, "1444312383.018307",
False, None, None,
{})
self.assertEqual(cookie.expires, 1444312383)
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
self.assertIn("/blah/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
self.assertIn("/blah/rhubarb/", c._cookies["www.acme.com"])
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
self.assertIn("/blah", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
self.assertIn("/blah/rhubarb", c._cookies["www.acme.com"])
def test_default_path_with_query(self):
cj = CookieJar()
uri = "http://example.com/?spam/eggs"
value = 'eggs="bar"'
interact_netscape(cj, uri, value)
# Default path does not include query, so is "/", not "/?spam".
self.assertIn("/", cj._cookies["example.com"])
# Cookie is sent back to the same URI.
self.assertEqual(interact_netscape(cj, uri), value)
def test_escape_path(self):
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode, latin-1 range
("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded
# unicode
("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assertEqual(escape_path(arg), result)
def test_request_path(self):
# with parameters
req = urllib.request.Request(
"http://www.example.com/rheum/rhaponticum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assertEqual(request_path(req),
"/rheum/rhaponticum;foo=bar;sing=song")
# without parameters
req = urllib.request.Request(
"http://www.example.com/rheum/rhaponticum?"
"apples=pears&spam=eggs#ni")
self.assertEqual(request_path(req), "/rheum/rhaponticum")
# missing final slash
req = urllib.request.Request("http://www.example.com")
self.assertEqual(request_path(req), "/")
def test_request_port(self):
req = urllib.request.Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
self.assertEqual(request_port(req), "1234")
req = urllib.request.Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
self.assertEqual(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
# this request is illegal (RFC2616, 14.2.3)
req = urllib.request.Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#self.assertEqual(request_host(req), "www.acme.com")
self.assertEqual(request_host(req), "1.1.1.1")
req = urllib.request.Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
self.assertEqual(request_host(req), "www.acme.com")
# port shouldn't be in request-host
req = urllib.request.Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
self.assertEqual(request_host(req), "www.acme.com")
def test_is_HDN(self):
self.assertTrue(is_HDN("foo.bar.com"))
self.assertTrue(is_HDN("1foo2.3bar4.5com"))
self.assertFalse(is_HDN("192.168.1.1"))
self.assertFalse(is_HDN(""))
self.assertFalse(is_HDN("."))
self.assertFalse(is_HDN(".foo.bar.com"))
self.assertFalse(is_HDN("..foo"))
self.assertFalse(is_HDN("foo."))
def test_reach(self):
self.assertEqual(reach("www.acme.com"), ".acme.com")
self.assertEqual(reach("acme.com"), "acme.com")
self.assertEqual(reach("acme.local"), ".local")
self.assertEqual(reach(".local"), ".local")
self.assertEqual(reach(".com"), ".com")
self.assertEqual(reach("."), ".")
self.assertEqual(reach(""), "")
self.assertEqual(reach("192.168.0.1"), "192.168.0.1")
def test_domain_match(self):
self.assertTrue(domain_match("192.168.1.1", "192.168.1.1"))
self.assertFalse(domain_match("192.168.1.1", ".168.1.1"))
self.assertTrue(domain_match("x.y.com", "x.Y.com"))
self.assertTrue(domain_match("x.y.com", ".Y.com"))
self.assertFalse(domain_match("x.y.com", "Y.com"))
self.assertTrue(domain_match("a.b.c.com", ".c.com"))
self.assertFalse(domain_match(".c.com", "a.b.c.com"))
self.assertTrue(domain_match("example.local", ".local"))
self.assertFalse(domain_match("blah.blah", ""))
self.assertFalse(domain_match("", ".rhubarb.rhubarb"))
self.assertTrue(domain_match("", ""))
self.assertTrue(user_domain_match("acme.com", "acme.com"))
self.assertFalse(user_domain_match("acme.com", ".acme.com"))
self.assertTrue(user_domain_match("rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("www.rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("x.y.com", "x.Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".Y.com"))
self.assertFalse(user_domain_match("x.y.com", "Y.com"))
self.assertTrue(user_domain_match("y.com", "Y.com"))
self.assertFalse(user_domain_match(".y.com", "Y.com"))
self.assertTrue(user_domain_match(".y.com", ".Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".com"))
self.assertFalse(user_domain_match("x.y.com", "com"))
self.assertFalse(user_domain_match("x.y.com", "m"))
self.assertFalse(user_domain_match("x.y.com", ".m"))
self.assertFalse(user_domain_match("x.y.com", ""))
self.assertFalse(user_domain_match("x.y.com", "."))
self.assertTrue(user_domain_match("192.168.1.1", "192.168.1.1"))
# not both HDNs, so must string-compare equal to match
self.assertFalse(user_domain_match("192.168.1.1", ".168.1.1"))
self.assertFalse(user_domain_match("192.168.1.1", "."))
# empty string is a special case
self.assertFalse(user_domain_match("192.168.1.1", ""))
def test_wrong_domain(self):
# Cookies whose effective request-host name does not domain-match the
# domain are rejected.
# XXX far from complete
c = CookieJar()
interact_2965(c, "http://www.nasty.com/",
'foo=bar; domain=friendly.org; Version="1"')
self.assertEqual(len(c), 0)
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEqual(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEqual(len(cj), 2)
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain
# should all get accepted, as should .acme.com, acme.com and no domain
# for 2-component domains like acme.com.
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies["foo.net"]["/"]["ns"].value, "bar")
self.assertEqual(interact_netscape(c, "http://foo.net/"), "ns=bar")
# *will* be returned to any other domain (unlike RFC 2965)...
self.assertEqual(interact_netscape(c, "http://www.foo.net/"),
"ns=bar")
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
self.assertEqual(interact_netscape(c, "http://www.foo.net/"), "")
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
self.assertEqual(len(c), 3)
self.assertEqual(c._cookies[".foo.net"]["/foo"]["spam1"].value,
"eggs")
self.assertEqual(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value,
"eggs")
self.assertEqual(interact_netscape(c, "http://foo.net/foo/bar/"),
"spam2=eggs; spam1=eggs; ns=bar")
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
self.assertEqual(len(c), 3)
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## self.assertEqual(len(c), 2)
self.assertEqual(len(c), 4)
def test_two_component_domain_rfc2965(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies["foo.net"]["/"]["foo"].value, "bar")
self.assertEqual(interact_2965(c, "http://foo.net/"),
"$Version=1; foo=bar")
# won't be returned to any other domain (because domain was implied)
self.assertEqual(interact_2965(c, "http://www.foo.net/"), "")
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(interact_2965(c, "http://foo.net/foo"),
"$Version=1; foo=bar")
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
self.assertEqual(c._cookies[".foo.net"]["/foo/"]["spam"].value,
"eggs")
self.assertEqual(len(c), 2)
self.assertEqual(interact_2965(c, "http://foo.net/foo/"),
"$Version=1; foo=bar")
self.assertEqual(interact_2965(c, "http://www.foo.net/foo/"),
'$Version=1; spam=eggs; $Domain="foo.net"')
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
self.assertEqual(len(c), 2)
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEqual(len(c), 3)
def test_domain_allow(self):
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = urllib.request.Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
# set a cookie with non-allowed domain...
req = urllib.request.Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_domain_block(self):
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
p = pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
c.clear()
req = urllib.request.Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
self.assertTrue(req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
# set a cookie with blocked domain...
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_secure(self):
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
self.assertFalse(
c._cookies["www.acme.com"]["/"]["foo1"].secure,
"non-secure cookie registered secure")
self.assertTrue(
c._cookies["www.acme.com"]["/"]["foo2"].secure,
"secure cookie registered non-secure")
def test_quote_cookie_value(self):
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
self.assertEqual(h, r'$Version=1; foo=\\b\"a\"r')
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = urllib.request.Request(url)
self.assertEqual(len(c), 1)
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
def test_domain_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Domain", h,
"absent domain returned with domain present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain=".bar.com"', h, "domain not returned")
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain="bar.com"', h, "domain not returned")
def test_path_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Path", h, "absent path returned with path present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assertIn('$Path="/"', h, "path not returned")
def test_port_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Port", h, "absent port returned with port present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
self.assertRegex(h, "\$Port([^=]|$)",
"port with no value not returned with no value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assertIn('$Port="80"', h,
"port with single value not returned with single value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assertIn('$Port="80,8080"', h,
"port with multiple values not returned with multiple "
"values")
def test_no_return_comment(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
self.assertNotIn("Comment", h,
"Comment or CommentURL cookie-attributes returned to server")
def test_Cookie_iterator(self):
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
for i in range(4):
i = 0
for c in cs:
self.assertIsInstance(c, Cookie)
self.assertEqual(c.version, versions[i])
self.assertEqual(c.name, names[i])
self.assertEqual(c.domain, domains[i])
self.assertEqual(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
# missing domain value (invalid cookie)
self.assertEqual(
parse_ns_headers(["foo=bar; path=/; domain"]),
[[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
)
# invalid expires value
self.assertEqual(
parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]),
[[("foo", "bar"), ("expires", None), ("version", "0")]]
)
# missing cookie value (valid cookie)
self.assertEqual(
parse_ns_headers(["foo"]),
[[("foo", None), ("version", "0")]]
)
# missing cookie values for parsed attributes
self.assertEqual(
parse_ns_headers(['foo=bar; expires']),
[[('foo', 'bar'), ('expires', None), ('version', '0')]])
self.assertEqual(
parse_ns_headers(['foo=bar; version']),
[[('foo', 'bar'), ('version', None)]])
# shouldn't add version if header is empty
self.assertEqual(parse_ns_headers([""]), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
c = CookieJar()
req = urllib.request.Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
future = time2netscape(time.time()+3600)
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
# bad version
["Set-Cookie: b=foo; version=spam"],
["Set-Cookie:; Expires=%s" % future],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
self.assertEqual(len(c), 0)
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
self.assertIsNone(cookie.expires)
class LWPCookieTests(unittest.TestCase):
# Tests taken from libwww-perl, with a few modifications and additions.
def test_netscape_example_1(self):
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = urllib.request.Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = urllib.request.Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"), "CUSTOMER=WILE_E_COYOTE")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
self.assertNotIn("SHIPPING=FEDEX", h)
req = urllib.request.Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
self.assertTrue(h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = urllib.request.Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
self.assertRegex(req.get_header("Cookie"),
r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001")
def test_ietf_example_1(self):
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertFalse(cookie)
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
self.assertRegex(cookie,
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$')
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assertRegex(cookie, r'^\$Version="?1"?;')
self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"')
self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;'
'\s*\$Path="\/acme"')
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
self.assertRegex(cookie, r'Shipping="?FedEx"?;\s*\$Path="\/acme"')
self.assertIn("WILE_E_COYOTE", cookie)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
self.assertRegex(cookie, r"Riding_Rocket_0023.*Rocket_Launcher_0001")
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
self.assertIn("Rocket_Launcher_0001", cookie)
self.assertNotIn("Riding_Rocket_0023", cookie)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
self.assertFalse(c)
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEqual(len(c), 2)
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEqual(len(c), 2)
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEqual(len(c), 2)
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEqual(len(c), 3)
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
self.assertEqual(len(c), 3)
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
self.assertEqual(len(c), 4)
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
self.assertEqual(len(c), 5)
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
self.assertEqual(len(c), 6)
# save and restore
filename = test.support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(old, repr(c))
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/"
"%3c%3c%0Anew%C3%A5/%C3%A5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
self.assertIn("foo=bar", cookie)
self.assertRegex(cookie, version_re)
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345")
self.assertFalse(cookie)
# unicode URL doesn't raise exception
cookie = interact_2965(c, "http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
year_plus_one = time.localtime()[0] + 1
filename = test.support.TESTFN
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
self.assertEqual(len(new_c), 6) # none discarded
self.assertIn("name='foo1', value='bar'", repr(new_c))
new_c = save_and_restore(c, False)
self.assertEqual(len(new_c), 4) # 2 of them discarded on save
self.assertIn("name='foo1', value='bar'", repr(new_c))
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
c = CookieJar()
headers = []
req = urllib.request.Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = urllib.request.Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
self.assertIn("PART_NUMBER=3,4", req.get_header("Cookie"))
self.assertIn("Customer=WILE_E_COYOTE",req.get_header("Cookie"))
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
self.assertIn("foo1=bar", cookie)
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
self.assertIn("foo2=bar", cookie)
self.assertEqual(len(c), 3)
def test_intranet_domains_ns(self):
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
self.assertEqual(len(c), 2)
self.assertIn("foo1=bar", cookie)
cookie = interact_netscape(c, "http://example/")
self.assertIn("foo2=bar", cookie)
self.assertEqual(len(c), 2)
def test_empty_path(self):
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = urllib.request.Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.ants.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
# missing path in the request URI
req = urllib.request.Request("http://www.ants.com:8080")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
def test_session_cookies(self):
year_plus_one = time.localtime()[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = urllib.request.Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
# a permanent cookie got lost accidently
self.assertEqual(counter["perm_after"], counter["perm_before"])
# a session cookie hasn't been cleared
self.assertEqual(counter["session_after"], 0)
# we didn't have session cookies in the first place
self.assertNotEqual(counter["session_before"], 0)
def test_main(verbose=None):
test.support.run_unittest(
DateTimeTests,
HeaderTests,
CookieTests,
FileCookieJarTests,
LWPCookieTests,
)
if __name__ == "__main__":
test_main(verbose=True)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_http_cookiejar.py
|
Python
|
gpl-3.0
| 73,097
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser import PagesBrowser, URL
from .pages import SearchPage, AdvertPage
__all__ = ['IndeedBrowser']
class IndeedBrowser(PagesBrowser):
BASEURL = 'http://www.indeed.fr'
search_page = URL('/emplois(?P<parameters>.*)', SearchPage)
advert_page = URL('/cmp/(?P<company>.*)/jobs/(?P<title>.*)-(?P<nb>.*)', AdvertPage)
def search_job(self, metier='', contrat='', limit_date='', radius='', place=''):
params = '?as_ttl=%s&limit=10&sort=date&st=employer&sr=directhire&jt=%s&fromage=%s&radius=%s'\
% (metier.replace(' ', '+'), contrat, limit_date, radius)
if place:
params = '%s&l=%s' % (params, place)
self.search_page.go(parameters=params)
assert self.search_page.is_here(parameters=params)
return self.page.iter_job_adverts()
def get_job_advert(self, _id, advert):
splitted_id = _id.split('#')
return self.advert_page.go(nb=splitted_id[0],
title=splitted_id[1],
company=splitted_id[2]).get_job_advert(obj=advert)
|
sputnick-dev/weboob
|
modules/indeed/browser.py
|
Python
|
agpl-3.0
| 1,835
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import abc
from collections import defaultdict
import random
import numpy as np
from htmresearch.algorithms.apical_tiebreak_temporal_memory import (
ApicalTiebreakPairMemory)
from htmresearch.algorithms.column_pooler import ColumnPooler
from htmresearch.algorithms.single_layer_location_memory import (
SingleLayerLocationMemory)
class SingleLayerLocation2DExperiment(object):
"""
The experiment code organized into a class.
"""
def __init__(self, diameter, objects, featureNames):
self.diameter = diameter
self.objects = objects
# A grid of location SDRs.
self.locations = dict(
((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
for i in xrange(diameter)
for j in xrange(diameter))
# 8 transition SDRs -- one for each straight and diagonal direction.
self.transitions = dict(
((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
for i in xrange(-1, 2)
for j in xrange(-1, 2)
if i != 0 or j != 0)
self.features = dict(
(k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
for k in featureNames)
self.locationLayer = SingleLayerLocationMemory(**{
"cellCount": 1000,
"deltaLocationInputSize": 1000,
"featureLocationInputSize": 150*32,
"sampleSize": 15,
"activationThreshold": 10,
"learningThreshold": 8,
})
self.inputLayer = ApicalTiebreakPairMemory(**{
"columnCount": 150,
"cellsPerColumn": 32,
"basalInputSize": 1000,
"apicalInputSize": 4096,
})
self.objectLayer = ColumnPooler(**{
"inputWidth": 150 * 32
})
# Use these for classifying SDRs and for testing whether they're correct.
self.inputRepresentations = {}
self.objectRepresentations = {}
self.learnedObjectPlacements = {}
self.monitors = {}
self.nextMonitorToken = 1
def addMonitor(self, monitor):
"""
Subscribe to SingleLayer2DExperiment events.
@param monitor (SingleLayer2DExperimentMonitor)
An object that implements a set of monitor methods
@return (object)
An opaque object that can be used to refer to this monitor.
"""
token = self.nextMonitorToken
self.nextMonitorToken += 1
self.monitors[token] = monitor
return token
def removeMonitor(self, monitorToken):
"""
Unsubscribe from LocationExperiment events.
@param monitorToken (object)
The return value of addMonitor() from when this monitor was added
"""
del self.monitors[monitorToken]
def doTimestep(self, locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn):
"""
Run one timestep.
"""
for monitor in self.monitors.values():
monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn)
params = {
"newLocation": locationSDR,
"deltaLocation": transitionSDR,
"featureLocationInput": self.inputLayer.getActiveCells(),
"featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
"learn": learn,
}
self.locationLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterLocationCompute(**params)
params = {
"activeColumns": featureSDR,
"basalInput": self.locationLayer.getActiveCells(),
"apicalInput": self.objectLayer.getActiveCells(),
}
self.inputLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterInputCompute(**params)
params = {
"feedforwardInput": self.inputLayer.getActiveCells(),
"feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
"learn": learn,
}
self.objectLayer.compute(**params)
for monitor in self.monitors.values():
monitor.afterObjectCompute(**params)
def learnTransitions(self):
"""
Train the location layer to do path integration. For every location, teach
it each previous-location + motor command pair.
"""
print "Learning transitions"
for (i, j), locationSDR in self.locations.iteritems():
print "i, j", (i, j)
for (di, dj), transitionSDR in self.transitions.iteritems():
i2 = i + di
j2 = j + dj
if (0 <= i2 < self.diameter and
0 <= j2 < self.diameter):
for _ in xrange(5):
self.locationLayer.reset()
self.locationLayer.compute(newLocation=self.locations[(i,j)])
self.locationLayer.compute(deltaLocation=transitionSDR,
newLocation=self.locations[(i2, j2)])
self.locationLayer.reset()
def learnObjects(self, objectPlacements):
"""
Learn each provided object in egocentric space. Touch every location on each
object.
This method doesn't try move the sensor along a path. Instead it just leaps
the sensor to each object location, resetting the location layer with each
leap.
This method simultaneously learns 4 sets of synapses:
- location -> input
- input -> location
- input -> object
- object -> input
"""
for monitor in self.monitors.values():
monitor.afterPlaceObjects(objectPlacements)
for objectName, objectDict in self.objects.iteritems():
self.reset()
objectPlacement = objectPlacements[objectName]
for locationName, featureName in objectDict.iteritems():
egocentricLocation = (locationName[0] + objectPlacement[0],
locationName[1] + objectPlacement[1])
locationSDR = self.locations[egocentricLocation]
featureSDR = self.features[featureName]
transitionSDR = np.empty(0)
self.locationLayer.reset()
self.inputLayer.reset()
for _ in xrange(10):
self.doTimestep(locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn=True)
self.inputRepresentations[(featureName, egocentricLocation)] = (
self.inputLayer.getActiveCells())
self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()
self.learnedObjectPlacements[objectName] = objectPlacement
def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
"""
Choose the transition that lands us in the location we've touched the least
often. Break ties randomly, i.e. choose the first candidate in a shuffled
list.
"""
candidates = list(transition
for transition in self.transitions.keys()
if (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1]) in objectDict)
random.shuffle(candidates)
selectedVisitCount = None
selectedTransition = None
selectedAllocentricLocation = None
for transition in candidates:
candidateLocation = (allocentricLocation[0] + transition[0],
allocentricLocation[1] + transition[1])
if (selectedVisitCount is None or
visitCounts[candidateLocation] < selectedVisitCount):
selectedVisitCount = visitCounts[candidateLocation]
selectedTransition = transition
selectedAllocentricLocation = candidateLocation
return selectedAllocentricLocation, selectedTransition
def inferObject(self, objectPlacements, objectName, startPoint,
transitionSequence, settlingTime=2):
for monitor in self.monitors.values():
monitor.afterPlaceObjects(objectPlacements)
objectDict = self.objects[objectName]
self.reset()
allocentricLocation = startPoint
nextTransitionSDR = np.empty(0, dtype="uint32")
transitionIterator = iter(transitionSequence)
try:
while True:
featureName = objectDict[allocentricLocation]
egocentricLocation = (allocentricLocation[0] +
objectPlacements[objectName][0],
allocentricLocation[1] +
objectPlacements[objectName][1])
featureSDR = self.features[featureName]
steps = ([nextTransitionSDR] +
[np.empty(0)]*settlingTime)
for transitionSDR in steps:
self.doTimestep(np.empty(0), transitionSDR, featureSDR,
egocentricLocation, learn=False)
transitionName = transitionIterator.next()
allocentricLocation = (allocentricLocation[0] + transitionName[0],
allocentricLocation[1] + transitionName[1])
nextTransitionSDR = self.transitions[transitionName]
except StopIteration:
pass
def inferObjectsWithRandomMovements(self, objectPlacements, maxTouches=20,
settlingTime=2):
"""
Infer each object without any location input.
"""
for monitor in self.monitors.values():
monitor.afterPlaceObjects(objectPlacements)
for objectName, objectDict in self.objects.iteritems():
self.reset()
visitCounts = defaultdict(int)
learnedObjectPlacement = self.learnedObjectPlacements[objectName]
allocentricLocation = random.choice(objectDict.keys())
nextTransitionSDR = np.empty(0, dtype="uint32")
# Traverse the object until it is inferred.
success = False
for _ in xrange(maxTouches):
featureName = objectDict[allocentricLocation]
egocentricLocation = (allocentricLocation[0] +
objectPlacements[objectName][0],
allocentricLocation[1] +
objectPlacements[objectName][1])
featureSDR = self.features[featureName]
steps = ([nextTransitionSDR] +
[np.empty(0)]*settlingTime)
for transitionSDR in steps:
self.doTimestep(np.empty(0), transitionSDR, featureSDR,
egocentricLocation, learn=False)
visitCounts[allocentricLocation] += 1
# We should eventually infer the egocentric location where we originally
# learned this location on the object.
learnedEgocentricLocation = (
allocentricLocation[0] + learnedObjectPlacement[0],
allocentricLocation[1] + learnedObjectPlacement[1])
if (set(self.objectLayer.getActiveCells()) ==
set(self.objectRepresentations[objectName]) and
set(self.inputLayer.getActiveCells()) ==
set(self.inputRepresentations[(featureName,
learnedEgocentricLocation)]) and
set(self.locationLayer.getActiveCells()) ==
set(self.locations[learnedEgocentricLocation])):
success = True
break
else:
allocentricLocation, transitionName = self._selectTransition(
allocentricLocation, objectDict, visitCounts)
nextTransitionSDR = self.transitions[transitionName]
def reset(self):
self.locationLayer.reset()
self.objectLayer.reset()
self.inputLayer.reset()
for monitor in self.monitors.values():
monitor.afterReset()
class SingleLayer2DExperimentMonitor(object):
"""
Abstract base class for a SingleLayer2DExperiment monitor.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def beforeTimestep(self, locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn):
pass
@abc.abstractmethod
def afterReset(self):
pass
@abc.abstractmethod
def afterPlaceObjects(self, objectPlacements):
pass
|
ywcui1990/nupic.research
|
projects/location_layer/single_layer_2d_experiment/runner.py
|
Python
|
agpl-3.0
| 12,574
|
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'scooper'
import sys
import os
import optparse
import shlex
import copy
from voltcli import utility
# Volt CLI command processor
# Individual option variables are added by the option parser. They are available
# externally as module attributes.
#===============================================================================
class BaseOption(object):
#===============================================================================
"""
General CLI option specification (uses optparse keywords for now).
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
self.short_opt = short_opt
self.long_opt = long_opt
self.kwargs = kwargs
self.kwargs['dest'] = dest
# A help message of None makes it a hidden option.
if help_msg is not None:
self.kwargs['help'] = help_msg
if 'default' in self.kwargs:
if utility.is_string(kwargs['default']):
self.kwargs['help'] += ' (default="%s")' % self.kwargs['default']
else:
self.kwargs['help'] += ' (default=%s)' % self.kwargs['default']
else:
self.kwargs['help'] = optparse.SUPPRESS_HELP
def get_option_names(self):
return [a for a in (self.short_opt, self.long_opt) if a is not None]
def get_dest(self):
if 'dest' not in self.kwargs:
utility.abort('%s must specify a "dest" property.' % self.__class__.__name__)
return self.kwargs['dest']
def get_default(self):
return self.kwargs.get('default', None)
def postprocess_value(self, value):
# Hook for massaging the option instance value. Default to NOP.
return value
def __str__(self):
return '%s(%s/%s %s)' % (self.__class__.__name__,
self.short_opt, self.long_opt, self.kwargs)
def __cmp__(self, other):
# Sort options by lowercase letter or word, depending on which is available.
if self.short_opt:
if other.short_opt:
return cmp(self.short_opt.lower(), other.short_opt.lower())
return 1
if other.short_opt:
return -1
if self.long_opt:
if other.long_opt:
return cmp(self.long_opt.lower(), other.long_opt.lower())
return 1
if other.long_opt:
return -1
return 0
def has_value(self):
return (not 'action' in self.kwargs or self.kwargs['action'] == 'store')
#===============================================================================
class BooleanOption(BaseOption):
#===============================================================================
"""
Boolean CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg,
action = 'store_true', **kwargs)
#===============================================================================
class StringOption(BaseOption):
#===============================================================================
"""
CLI string value option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
#===============================================================================
class IntegerOption(BaseOption):
#===============================================================================
"""
Integer CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
if type(value) is not int:
try:
converted = int(value.strip())
except ValueError:
utility.abort('Bad "%s" integer value: %s' % (self.get_dest().upper(), value))
return converted
return value
#===============================================================================
class StringListOption(StringOption):
#===============================================================================
"""
CLI comma-separated string list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
return [v.strip() for v in value.split(',')]
#===============================================================================
class IntegerListOption(StringOption):
#===============================================================================
"""
CLI comma-separated integer list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
bad = []
converted = []
for v in value.split(','):
try:
converted.append(int(v.strip()))
except ValueError:
bad.append(v.strip())
if bad:
utility.abort('Bad "%s" integer list value(s):' % self.get_dest().upper(), bad)
return converted
#===============================================================================
class EnumOption(StringOption):
#===============================================================================
"""
Enumeration option for selecting from a list of possible symbols.
"""
def __init__(self, short_opt, long_opt, dest, help_pfx, *values, **kwargs):
if not values or len(values) <= 1:
utility.abort('EnumOption "%s" must specify multiple valid values.' % dest)
self.values = values
help_msg = '%s [%s]' % (help_pfx, '|'.join(self.values))
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
if value not in self.values:
utility.abort('EnumOption "%s" value "%s" is not one of the following:'
% (self.get_dest(), value), self.values)
return value
#===============================================================================
class HostOption(StringOption):
#===============================================================================
"""
Comma-separated HOST[:PORT] list option.
"""
def __init__(self, short_opt, long_opt, dest, name, **kwargs):
self.min_count = utility.kwargs_get_integer(kwargs, 'min_count', default = 1)
self.max_count = utility.kwargs_get_integer(kwargs, 'max_count', default = 1)
self.default_port = utility.kwargs_get_integer(kwargs, 'default_port', default = 21212)
if self.max_count == 1:
help_msg = 'the %s HOST[:PORT]' % name
else:
help_msg = 'the comma-separated %s HOST[:PORT] list' % name
if self.default_port:
help_msg += ' (default port=%d)' % self.default_port
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
hosts = utility.parse_hosts(value,
min_hosts = self.min_count,
max_hosts = self.max_count,
default_port = self.default_port)
if self.max_count == 1:
return hosts[0]
return hosts
#===============================================================================
class ArgumentException(Exception):
#===============================================================================
pass
#===============================================================================
class BaseArgument(object):
#===============================================================================
def __init__(self, name, help, **kwargs):
self.name = name
self.help = help
self.min_count = kwargs.get('min_count', 1)
self.max_count = kwargs.get('max_count', 1)
# A max_count value of None is interpreted as infinity.
if self.max_count is None:
self.max_count = sys.maxint
def get(self, value):
utility.abort('BaseArgument subclass must implement a get(value) method: %s'
% self.__class__.__name__)
#===============================================================================
class StringArgument(BaseArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
BaseArgument.__init__(self, name, help, **kwargs)
def get(self, value):
return str(value)
#===============================================================================
class IntegerArgument(BaseArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
BaseArgument.__init__(self, name, help, **kwargs)
def get(self, value):
try:
return int(value)
except ValueError, e:
raise ArgumentException('%s value is not a valid integer: %s'
% (self.name.upper(), str(value)))
#===============================================================================
class PathArgument(StringArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
# For now the only intelligence is to check for absolute paths when required.
# TODO: Add options to check for directories, files, attributes, etc..
self.absolute = utility.kwargs_get_boolean(kwargs, 'absolute', default = False)
self.exists = utility.kwargs_get_boolean(kwargs, 'exists', default = False)
requirements = []
help2 = ''
if self.absolute:
requirements.append('absolute path')
if self.exists:
requirements.append('must exist')
if requirements:
help2 = ' (%s)' % ', '.join(requirements)
StringArgument.__init__(self, name, help + help2, **kwargs)
def get(self, value):
svalue = str(value)
if self.absolute and not svalue.startswith('/'):
raise ArgumentException('%s path is not absolute: %s' % (self.name.upper(), svalue))
if self.exists and not os.path.exists(svalue):
raise ArgumentException('%s path does not exist: %s' % (self.name.upper(), svalue))
return svalue
#===============================================================================
class ParsedCommand(object):
#===============================================================================
"""
Holds the result of parsing a CLI command.
"""
def __init__(self, parser, opts, args, verb):
self.opts = opts
self.args = args
self.parser = parser
self.verb = verb
def __str__(self):
return 'ParsedCommand: %s %s %s' % (self.verb.name, self.opts, self.args)
#===============================================================================
class ExtendedHelpOptionParser(optparse.OptionParser):
#===============================================================================
'''
Extends OptionParser in order to support extended help.
'''
def __init__(self, *args, **kwargs):
self.format_epilog_called = False
optparse.OptionParser.__init__(self, *args, **kwargs)
def format_epilog(self, formatter):
"""
OptionParser hook that allows us to append verb descriptions to the
help message.
"""
self.format_epilog_called = True
return self.on_format_epilog()
def print_help(self):
"""
Override OptionParser.print_help() to work around Python 2.4 optparse
not supporting format_epilog().
"""
self.format_epilog_called = False
optparse.OptionParser.print_help(self)
if not self.format_epilog_called:
sys.stdout.write(self.on_format_epilog())
def on_format_epilog(self):
utility.abort('ExtendedHelpOptionParser subclass must override on_format_epilog(): %s'
% self.__class__.__name__)
#===============================================================================
class CLIParser(ExtendedHelpOptionParser):
#===============================================================================
"""
Command/sub-command (verb) argument and option parsing and validation.
"""
def __init__(self, prog, verbs, base_options, usage, description, version):
"""
Command line processor constructor.
"""
self.prog = prog
self.verb = None
self.verbs = verbs
self.verb_names = verbs.keys()
self.base_options = base_options
self.verb_names.sort()
self.base_options.sort()
optparse.OptionParser.__init__(self,
prog = prog,
description = description,
usage = usage,
version = version)
def add_base_options(self):
"""
Add the base options.
"""
for option in self.base_options:
self.add_option(*option.get_option_names(), **option.kwargs)
def add_verb_options(self, verb):
"""
Add options for verb command line.
"""
for option in verb.iter_options():
try:
self.add_option(*option.get_option_names(), **option.kwargs)
except Exception, e:
utility.abort('Exception initializing options for verb "%s".' % verb.name, e)
def process_verb_options(self, verb, opts):
"""
Validate the verb options and post-process the values.
"""
max_width = 0
missing = []
# Post-process the option values, e.g. convert strings to lists as needed.
for o in verb.iter_options():
dest = o.get_dest()
value = getattr(opts, dest)
if not value is None:
setattr(opts, dest, o.postprocess_value(value))
def process_verb_arguments(self, verb, verb_args, verb_opts):
"""
Validate the verb arguments. Check that required arguments are present
and populate verb_opts attributes with scalar values or lists (for
trailing arguments with max_count > 1).
"""
# Add fixed arguments passed in through the decorator to the verb object.
args = copy.copy(verb_args) + verb.command_arguments
# Set attributes for required arguments.
missing = []
exceptions = []
iarg = 0
nargs = verb.get_argument_count()
for arg in verb.iter_arguments():
# It's missing if we've exhausted all the arguments before
# exhausting all the argument specs, unless it's the last argument
# spec and it's optional.
if iarg > len(args) or (iarg == len(args) and arg.min_count > 0):
missing.append((arg.name, arg.help))
else:
value = None
# The last argument can have repeated arguments. If more than
# one are allowed the values are put into a list.
if iarg == nargs - 1 and arg.max_count > 1:
if len(args) - iarg < arg.min_count:
utility.abort('A minimum of %d %s arguments are required.'
% (arg.min_count, arg.name.upper()))
if len(args) - iarg > arg.max_count:
utility.abort('A maximum of %d %s arguments are allowed.'
% (arg.max_count, arg.name.upper()))
# Pass through argument class get() for validation, conversion, etc..
# Skip bad values and report on them at the end.
value = []
for v in args[iarg:]:
try:
value.append(arg.get(v))
except ArgumentException, e:
exceptions.append(e)
iarg = len(args)
elif len(args) > 0:
# All other arguments are treated as scalars.
# Pass through argument class get() for validation, conversion, etc..
try:
value = arg.get(args[iarg])
except ArgumentException, e:
exceptions.append(e)
iarg += 1
if value is not None or arg.min_count == 0:
setattr(verb_opts, arg.name, value)
# Run the gauntlet of error disclosure. Abort and display usage as appropriate.
had_errors = 0
show_usage = False
if exceptions:
msg = 'Argument value %s:' % utility.pluralize('error', len(exceptions))
utility.error(msg, [str(e) for e in exceptions])
had_errors += 1
if iarg < len(args):
self._abort('Extra arguments were provided:', args[iarg:])
had_errors += 1
show_usage = True
if missing:
fmt = '%%-%ds %%s' % max([len(o) for (o, h) in missing])
msg = 'Missing required %s:' % utility.pluralize('argument', len(missing))
utility.error(msg, [fmt % (o.upper(), h) for (o, h) in missing])
had_errors += 1
show_usage = True
if had_errors > 0:
if show_usage:
self._abort()
sys.exit(1)
def initialize_verb(self, verb_name):
"""
Initialize command line options for a specific verb.
"""
# Add the base options that are applicable to all verbs.
self.add_base_options()
# See if we know about the verb.
if verb_name.startswith('-'):
self._abort('The first argument must be a verb, not an option.')
if verb_name not in self.verbs:
self._abort('Unknown verb: %s' % verb_name)
self.verb = self.verbs[verb_name]
# Change the messaging from generic to verb-specific.
self.set_usage(self._get_verb_usage(self.verb, brief=False))
self.set_description(self.verb.cli_spec.get_attr('description', 'No description provided'))
# Parse the command-specific options.
self.add_verb_options(self.verb)
def parse(self, *cmdargs):
"""
Parse command line.
"""
# Need something.
if not cmdargs:
self._abort('No verb was specified.')
pre_opts = preprocess_options(self.base_options, cmdargs)
# Support verb-less options like -h, --help and --version.
if cmdargs[0].startswith('-') and (pre_opts.help or pre_opts.version):
opts, args = self.parse_args(list(cmdargs))
return ParsedCommand(self, opts, args, None)
# Initialize options and arguments.
self.initialize_verb(cmdargs[0])
verb_cmdargs = list(cmdargs[1:])
if self.verb.cli_spec.passthrough:
# Provide all options and arguments without processing the options.
# E.g. Java programs want to handle all the options without interference.
verb_args = verb_cmdargs
verb_opts = None
else:
# Parse the verb command line.
verb_opts, verb_parsed_args = self.parse_args(verb_cmdargs)
# Post-process options.
self.process_verb_options(self.verb, verb_opts)
# Post-process arguments.
self.process_verb_arguments(self.verb, verb_parsed_args, verb_opts)
# The arguments should all be attributes in verb_opts now.
verb_args = []
return ParsedCommand(self, verb_opts, verb_args, self.verb)
def get_usage_string(self):
"""
Get usage string.
"""
# Swap stdout with UsageScraper pseudo-file object so that output is captured.
# Necessary because optparse only sends help to stdout.
class UsageScraper(object):
def __init__(self):
self.usage = []
def write(self, s):
self.usage.append(s)
scraper = UsageScraper()
stdout_save = sys.stdout
try:
sys.stdout = scraper
self.print_help()
finally:
sys.stdout = stdout_saves
return ''.join(scraper.usage)
def on_format_epilog(self):
if not self.verb:
return self._format_verb_list()
blocks = []
if self.verb.get_argument_count() > 0:
rows = [(get_argument_usage(a), a.help) for a in self.verb.iter_arguments()]
blocks.append('\n'.join(['Arguments:', utility.format_table(rows, indent = 2)]))
# other_info is used for the multi-verb variation list.
other_info = self.verb.cli_spec.get_attr('other_info', None)
if other_info:
blocks.append(other_info.strip())
# Automatically wrap description2 as a paragraph.
description2 = self.verb.cli_spec.get_attr('description2', None)
if description2:
blocks.append(utility.paragraph(description2))
return '\n%s' % '\n\n'.join(blocks)
def _abort(self, *msgs):
utility.error(*msgs)
sys.stdout.write('\n')
self.print_help()
sys.stdout.write('\n')
sys.exit(1)
def _format_verb_list(self):
rows1 = []
rows2 = []
for verb_name in self.verb_names:
verb = self.verbs[verb_name]
if not verb.cli_spec.hideverb:
usage = self._get_verb_usage(verb, brief=True)
if verb.cli_spec.baseverb:
rows2.append((usage, verb.cli_spec.description))
else:
rows1.append((usage, verb.cli_spec.description))
table1 = utility.format_table(rows1, caption = 'Verb Descriptions', separator = ' ')
table2 = utility.format_table(rows2, caption = 'Common Verbs', separator = ' ')
return '%s\n%s' % (table1, table2)
def _iter_options(self, verb):
options = []
for option in self.base_options:
yield option
if verb:
for option in verb.iter_options():
yield option
def _iter_visible_options(self, verb):
for option in self._iter_options(verb):
if option.kwargs.get('help', None) != optparse.SUPPRESS_HELP:
yield option
def _count_visible_options(self, verb):
return len([o for o in self._iter_visible_options(verb)])
def _get_verb_usage(self, verb, brief=False):
"""
Provide the full usage string, including argument names, for a verb.
"""
args = [get_argument_usage(a) for a in verb.iter_arguments()]
usage = [self.prog, verb.name]
if not brief:
num_visible_options = self._count_visible_options(verb)
if num_visible_options > 0:
usage.append('[ OPTIONS ... ]')
if verb.cli_spec.usage:
usage.append(verb.cli_spec.usage)
if args:
usage.append(' '.join(args))
return ' '.join(usage)
#===============================================================================
class CLISpec(object):
#===============================================================================
def __init__(self, **kwargs):
self._kwargs = kwargs
# Make sure options and arguments are flat lists.
if 'options' in self._kwargs:
self._kwargs['options'] = utility.flatten_to_list(self._kwargs['options'])
else:
self._kwargs['options'] = []
if 'arguments' in self._kwargs:
self._kwargs['arguments'] = utility.flatten_to_list(self._kwargs['arguments'])
else:
self._kwargs['arguments'] = []
def __getattr__(self, name):
return self._kwargs.get(name, None)
def __str__(self):
s = 'CLISpec: [\n'
keys = self._kwargs.keys()
keys.sort()
for key in keys:
s += ' %s: %s\n' % (key, utility.to_display_string(self._kwargs[key]))
s += ']'
return s
def add_to_list(self, name, *args):
utility.kwargs_merge_list(self._kwargs, name, *args)
def get_attr(self, name, default = None):
return utility.kwargs_get(self._kwargs, name, default = default, remove = False)
def pop_attr(self, name, default = None):
return utility.kwargs_get(self._kwargs, name, default = default, remove = True)
def merge_java_options(self, name, *options):
utility.kwargs_merge_java_options(self._kwargs, name, options)
def set_defaults(self, **kwargs):
utility.kwargs_set_defaults(self._kwargs, **kwargs)
def find_option(self, dest_name):
for o in self._kwargs['options']:
if o.get_dest() == dest_name:
return o
return None
def find_argument(self, dest_name):
for a in self._kwargs['arguments']:
if a.name == dest_name:
return a
return None
#===============================================================================
def get_argument_usage(a):
#===============================================================================
if a.max_count > 1:
ellipsis = ' ...'
else:
ellipsis = ''
if a.min_count == 0:
fmt = '[ %s%s ]'
else:
fmt = '%s%s'
return fmt % (a.name.upper(), ellipsis)
#===============================================================================
def preprocess_options(base_options, cmdargs):
#===============================================================================
"""
Simplistically parses command line options to allow early option checking.
Allows the parsing process to display debug messages. Returns an object
with attributes set for option values.
"""
class OptionValues(object):
pass
option_values = OptionValues()
# Create a base option dictionary indexed by short and long options.
# Add the built-in optparse help and version options so that they can be
# detected as stand-alone options.
options = {}
builtins = [BooleanOption('-h', '--help', 'help', ''),
BooleanOption(None, '--version', 'version', '')]
for opt in list(base_options) + builtins:
setattr(option_values, opt.get_dest(), opt.get_default())
if opt.short_opt:
options[opt.short_opt] = opt
if opt.long_opt:
options[opt.long_opt] = opt
# Walk through the options and arguments and set option values as attributes.
iopt = 0
while iopt < len(cmdargs):
if cmdargs[iopt].startswith('-'):
if cmdargs[iopt] in options:
opt = options[cmdargs[iopt]]
if opt.has_value():
# Option with argument
setattr(option_values, opt.get_dest(), cmdargs[iopt+1])
iopt += 1
else:
# Boolean option
setattr(option_values, opt.get_dest(), True)
iopt += 1
return option_values
|
wolffcm/voltdb
|
lib/python/voltcli/cli.py
|
Python
|
agpl-3.0
| 28,915
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import re
import time
import zipfile
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime
from airflow.exceptions import AirflowException
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.utils.logging import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self,
dag_id,
task_ids,
full_filepath,
concurrency,
is_paused,
pickle_id):
"""
:param dag_id: ID of the DAG
:type dag_id: unicode
:param task_ids: task IDs associated with the DAG
:type task_ids: list[unicode]
:param full_filepath: path to the file containing the DAG e.g.
/a/b/c.py
:type full_filepath: unicode
:param concurrency: No more than these many tasks from the
dag should run concurrently
:type concurrency: int
:param is_paused: Whether or not this DAG is paused. Tasks from paused
DAGs are not scheduled
:type is_paused: bool
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag_id
self._task_ids = task_ids
self._full_filepath = full_filepath
self._is_paused = is_paused
self._concurrency = concurrency
self._pickle_id = pickle_id
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type: list(SimpleDag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def list_py_file_paths(directory, safe_mode=True):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns = []
for root, dirs, files in os.walk(directory, followlinks=True):
ignore_file = [f for f in files if f == '.airflowignore']
if ignore_file:
f = open(os.path.join(root, ignore_file[0]), 'r')
patterns += [p for p in f.read().split('\n') if p]
f.close()
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as f:
content = f.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
logging.exception("Error while examining %s", f)
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def log_file(self):
"""
:return: the log file associated with this processor
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime]
"""
def __init__(self,
dag_directory,
file_paths,
parallelism,
process_file_interval,
child_process_log_directory,
max_runs,
processor_factory):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param parallelism: maximum number of simultaneous process to run at once
:type parallelism: int
:param process_file_interval: process a file at most once every this
many seconds
:type process_file_interval: float
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param child_process_log_directory: Store logs for child processes in
this directory
:type child_process_log_directory: unicode
:type process_file_interval: float
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode) -> (AbstractDagFileProcessor)
"""
self._file_paths = file_paths
self._file_path_queue = []
self._parallelism = parallelism
self._dag_directory = dag_directory
self._max_runs = max_runs
self._process_file_interval = process_file_interval
self._child_process_log_directory = child_process_log_directory
self._processor_factory = processor_factory
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Scheduler heartbeat key.
self._heart_beat_key = 'heart-beat'
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (datetime.now() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.logger.warning("Stopping processor for {}".format(file_path))
processor.stop()
self._processors = filtered_processors
@staticmethod
def _split_path(file_path):
"""
Return the path elements of a path as an array. E.g. /a/b/c ->
['a', 'b', 'c']
:param file_path: the file path to split
:return: a list of the elements of the file path
:rtype: list[unicode]
"""
results = []
while True:
head, tail = os.path.split(file_path)
if len(tail) != 0:
results.append(tail)
if file_path == head:
break
file_path = head
results.reverse()
return results
def _get_log_directory(self):
"""
Log output from processing DAGs for the current day should go into
this directory.
:return: the path to the corresponding log directory
:rtype: unicode
"""
now = datetime.now()
return os.path.join(self._child_process_log_directory,
now.strftime("%Y-%m-%d"))
def _get_log_file_path(self, dag_file_path):
"""
Log output from processing the specified file should go to this
location.
:param dag_file_path: file containing a DAG
:type dag_file_path: unicode
:return: the path to the corresponding log file
:rtype: unicode
"""
log_directory = self._get_log_directory()
# General approach is to put the log file under the same relative path
# under the log directory as the DAG file in the DAG directory
relative_dag_file_path = os.path.relpath(dag_file_path, start=self._dag_directory)
path_elements = self._split_path(relative_dag_file_path)
# Add a .log suffix for the log file
path_elements[-1] += ".log"
return os.path.join(log_directory, *path_elements)
def symlink_latest_log_directory(self):
"""
Create symbolic link to the current day's log directory to
allow easy access to the latest scheduler log files.
:return: None
"""
log_directory = self._get_log_directory()
latest_log_directory_path = os.path.join(
self._child_process_log_directory, "latest")
if (os.path.isdir(log_directory)):
# if symlink exists but is stale, update it
if (os.path.islink(latest_log_directory_path)):
if(os.readlink(latest_log_directory_path) != log_directory):
os.unlink(latest_log_directory_path)
os.symlink(log_directory, latest_log_directory_path)
elif (os.path.isdir(latest_log_directory_path) or
os.path.isfile(latest_log_directory_path)):
self.logger.warning("{} already exists as a dir/file. "
"Skip creating symlink."
.format(latest_log_directory_path))
else:
os.symlink(log_directory, latest_log_directory_path)
def processing_count(self):
"""
:return: the number of files currently being processed
:rtype: int
"""
return len(self._processors)
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def heartbeat(self):
"""
This should be periodically called by the scheduler. This method will
kick of new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[SimpleDag]
"""
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.logger.info("Processor for {} finished".format(file_path))
now = datetime.now()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.logger.warning("Processor for {} exited with return code "
"{}. See {} for details."
.format(processor.file_path,
processor.exit_code,
processor.log_file))
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = datetime.now()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._process_file_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.logger.debug("File path {} is still being processed (started: {})"
.format(processor.file_path,
processor.start_time.isoformat()))
self.logger.debug("Queuing the following files for processing:\n\t{}"
.format("\n\t".join(files_paths_to_queue)))
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
log_file_path = self._get_log_file_path(file_path)
processor = self._processor_factory(file_path, log_file_path)
processor.start()
self.logger.info("Started a process (PID: {}) to generate "
"tasks for {} - logging into {}"
.format(processor.pid, file_path, log_file_path))
self._processors[file_path] = processor
self.symlink_latest_log_directory()
# Update scheduler heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] != self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
|
holygits/incubator-airflow
|
airflow/utils/dag_processing.py
|
Python
|
apache-2.0
| 23,384
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ryu plugin update
Revision ID: 49332180ca96
Revises: 1149d7de0cfa
Create Date: 2013-01-30 07:52:58.472885
"""
# revision identifiers, used by Alembic.
revision = '49332180ca96'
down_revision = '1149d7de0cfa'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2'
]
from alembic import op
import sqlalchemy as sa
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('ofp_server')
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'ofp_server',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('address', sa.String(length=255)),
sa.Column('host_type', sa.String(length=255)),
sa.PrimaryKeyConstraint(u'id')
)
|
tpaszkowski/quantum
|
quantum/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py
|
Python
|
apache-2.0
| 1,687
|
# -*- coding: utf-8 -*-
__author__ = 'vahid'
|
farakavco/lutino
|
src/lutino/caching/tests/__init__.py
|
Python
|
apache-2.0
| 45
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
from mpi4py import MPI
def test_distributed_srm(): # noqa: C901
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nrank = comm.Get_size()
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features, comm=comm)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
# DSRM: broadcast S
S = comm.bcast(S)
X = []
W = []
# DSRM: only append on rank 0
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*np.random.random((voxels, samples))
if rank == 0:
W.append(Q)
X.append(Q.dot(S) + tmp_noise)
else:
W.append(None)
X.append(None)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
if rank == 0:
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
if rank == 0:
print("Test: running SRM with 1 subject")
# DSRM: cyclic distribution of subject data, otherwise None
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*np.random.random((voxels, samples))
if subject % nrank == rank:
W.append(Q)
X.append(Q.dot(S) + tmp_noise)
else:
W.append(None)
X.append(None)
# Check that runs with 2 subject
s.fit(X)
from pathlib import Path
sr_v0_4 = np.load(Path(__file__).parent / "sr_v0_4.npz")['sr']
assert(np.allclose(sr_v0_4, s.s_))
assert len(s.w_) == subjects, (
"Invalid computation of SRM! (wrong # subjects in W)")
for subject in range(subjects):
if s.w_[subject] is not None:
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of SRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, (
"Model seems incorrectly computed.")
assert s.s_.shape[0] == features, (
"Invalid computation of SRM! (wrong # features in S)")
assert s.s_.shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = s.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of SRM! (wrong # subjects after transform)")
for subject in range(subjects):
if new_s[subject] is not None:
assert new_s[subject].shape[0] == features, (
"Invalid computation of SRM! (wrong # features after "
"transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples after "
"transform)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform([X[1]])
if rank == 0:
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
if rank == 0:
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
if rank == 0:
S2 = S[:, :-2]
X.append(Q.dot(S2))
else:
X.append(None)
with pytest.raises(ValueError):
s.fit(X)
if rank == 0:
print("Test: different number of samples per subject")
test_distributed_srm()
|
brainiak/brainiak
|
tests/funcalign/test_srm_distributed.py
|
Python
|
apache-2.0
| 5,271
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import tweepy
def get_oauth_file(auth_file):
try:
fh = open(auth_file, 'rt')
except IOError:
print('Could not get Twitter credentials.')
return None
lines = [l.strip() for l in fh.readlines()]
oauth = tweepy.OAuthHandler(lines[0], lines[1])
oauth.set_access_token(lines[2], lines[3])
fh.close()
return oauth
def get_oauth_dict(auth_dict):
oauth = tweepy.OAuthHandler(auth_dict.get('consumer_token'),
auth_dict.get('consumer_secret'))
oauth.set_access_token(auth_dict.get('access_token'),
auth_dict.get('access_secret'))
return oauth
def update_status(msg, twitter_cred):
twitter_auth = get_oauth_dict(twitter_cred)
if twitter_auth is None:
return
twitter_api = tweepy.API(twitter_auth)
twitter_api.update_status(msg)
|
jmuhlich/indra
|
models/rasmachine/twitter_client.py
|
Python
|
bsd-2-clause
| 971
|
"""
.. _stats_cluster_sensors_2samp_spatial:
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import read_ch_connectivity
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.Raw(raw_fname, preload=True)
raw.filter(1, 30)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id, copy=False)
condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
X = [epochs[k].get_data() for k in condition_names] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
###############################################################################
# load FieldTrip neighbor definition to setup sensor connectivity
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
###############################################################################
# Compute permutation statistic
#
# How does it work? We use clustering to `bind` together features which are
# similar. Our features are the magnetic fields measured over our sensor
# array at different times. This reduces the multiple comparison problem.
# To compute the actual test-statistic, we first sum all F-values in all
# clusters. We end up with one statistic for each cluster.
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=2,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
# Note. The same functions works with source estimate. The only differences
# are the origin of the data, the size, and the connectivity definition.
# It can be used for single trials or for groups of subjects.
###############################################################################
# Visualize clusters
# configure variables for visualization
times = epochs.times * 1e3
colors = 'r', 'r', 'steelblue', 'steelblue'
linestyles = '-', '--', '-', '--'
# grand average as numpy arrray
grand_ave = np.array(X).mean(axis=1)
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# loop over significant clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster infomation, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at significant sensors
signals = grand_ave[..., ch_inds].mean(axis=-1)
sig_times = times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
title = 'Cluster #{0}'.format(i_clu + 1)
fig.suptitle(title, fontsize=14)
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axis=ax_topo,
cmap='Reds', vmin=np.min, vmax=np.max)
# advanced matplotlib for showing image with figure and colorbar
# in one plot
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
*sig_times[[0, -1]]
))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
for signal, name, col, ls in zip(signals, condition_names, colors,
linestyles):
ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
# add information
ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
ax_signals.set_xlim([times[0], times[-1]])
ax_signals.set_xlabel('time [ms]')
ax_signals.set_ylabel('evoked magnetic fields [fT]')
# plot significant time range
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
ax_signals.legend(loc='lower right')
ax_signals.set_ylim(ymin, ymax)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
"""
Exercises
----------
- What is the smallest p-value you can obtain, given the finite number of
permutations?
- use an F distribution to compute the threshold by traditional significance
levels. Hint: take a look at ```scipy.stats.distributions.f```
"""
|
cmoutard/mne-python
|
tutorials/plot_spatio_temporal_cluster_stats_sensor.py
|
Python
|
bsd-3-clause
| 7,157
|
# License: BSD Style.
from ...utils import verbose
from ..utils import _data_path, _get_version, _version_doc
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None):
"""
Get path to local copy of the kiloword dataset.
This is the dataset from [1]_.
Parameters
----------
path : None | str
Location of where to look for the kiloword data storing
location. If None, the environment variable or config parameter
MNE_DATASETS_KILOWORD_PATH is used. If it doesn't exist,
the "mne-python/examples" directory is used. If the
kiloword dataset is not found under the given path (e.g.,
as "mne-python/examples/MNE-kiloword-data"), the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
References
----------
.. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand
words are worth a picture: Snapshots of printed-word processing in an
event-related potential megastudy. Psychological science, 2015
"""
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='kiloword',
download=download)
def get_version():
"""Get dataset version."""
return _get_version('kiloword')
get_version.__doc__ = _version_doc.format(name='kiloword')
|
olafhauk/mne-python
|
mne/datasets/kiloword/kiloword.py
|
Python
|
bsd-3-clause
| 1,843
|
import warnings
import unittest
import sys
import os
import atexit
import numpy as np
from scipy import sparse
import pytest
from sklearn.utils.deprecation import deprecated
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils._testing import (
assert_raises,
assert_warns,
assert_no_warnings,
set_random_state,
assert_raise_message,
ignore_warnings,
check_docstring_parameters,
assert_allclose_dense_sparse,
assert_raises_regex,
TempMemmap,
create_memmap_backed_data,
_delete_folder,
_convert_container,
raises,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert tree.random_state == 3
def test_assert_allclose_dense_sparse():
x = np.arange(9).reshape(3, 3)
msg = "Not equal to tolerance "
y = sparse.csc_matrix(x)
for X in [x, y]:
# basic compare
with pytest.raises(AssertionError, match=msg):
assert_allclose_dense_sparse(X, X*2)
assert_allclose_dense_sparse(X, X)
with pytest.raises(ValueError, match="Can only compare two sparse"):
assert_allclose_dense_sparse(x, y)
A = sparse.diags(np.ones(5), offsets=0).tocsr()
B = sparse.csr_matrix(np.ones((1, 5)))
with pytest.raises(AssertionError, match="Arrays are not equal"):
assert_allclose_dense_sparse(B, A)
def test_assert_raises_msg():
with assert_raises_regex(AssertionError, 'Hello world'):
with assert_raises(ValueError, msg='Hello world'):
pass
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=FutureWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# Check that passing warning class as first positional argument
warning_class = UserWarning
match = "'obj' should be a callable.+you should use 'category=UserWarning'"
with pytest.raises(ValueError, match=match):
silence_warnings_func = ignore_warnings(warning_class)(
_warning_function)
silence_warnings_func()
with pytest.raises(ValueError, match=match):
@ignore_warnings(warning_class)
def test():
pass
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
filters_orig = warnings.filters[:]
assert assert_warns(UserWarning, f) == 3
# test that assert_warns doesn't have side effects on warnings
# filters
assert warnings.filters == filters_orig
with pytest.raises(AssertionError):
assert_no_warnings(f)
assert assert_no_warnings(lambda x: x, 1) == 1
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", FutureWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
# assert_warns has a special handling of "FutureWarning" that
# pytest.warns does not have
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
# Tests for docstrings:
def f_ok(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_sections(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_order(b, a):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_too_many_param_docstring(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : int
Parameter b
c : int
Parameter c
Returns
-------
d : list
Parameter c
"""
d = a + b
return d
def f_missing(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_check_param_definition(a, b, c, d, e):
"""Function f
Parameters
----------
a: int
Parameter a
b:
Parameter b
c :
Parameter c
d:int
Parameter d
e
No typespec is allowed without colon
"""
return a + b + c + d
class Klass:
def f_missing(self, X, y):
pass
def f_bad_sections(self, X, y):
"""Function f
Parameter
----------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
pass
class MockEst:
def __init__(self):
"""MockEstimator"""
def fit(self, X, y):
return X
def predict(self, X):
return X
def predict_proba(self, X):
return X
def score(self, X):
return 1.
class MockMetaEstimator:
def __init__(self, delegate):
"""MetaEstimator to check if doctest on delegated methods work.
Parameters
---------
delegate : estimator
Delegated estimator.
"""
self.delegate = delegate
@if_delegate_has_method(delegate=('delegate'))
def predict(self, X):
"""This is available only if delegate has predict.
Parameters
----------
y : ndarray
Parameter y
"""
return self.delegate.predict(X)
@if_delegate_has_method(delegate=('delegate'))
@deprecated("Testing a deprecated delegated method")
def score(self, X):
"""This is available only if delegate has score.
Parameters
---------
y : ndarray
Parameter y
"""
@if_delegate_has_method(delegate=('delegate'))
def predict_proba(self, X):
"""This is available only if delegate has predict_proba.
Parameters
---------
X : ndarray
Parameter X
"""
return X
@deprecated('Testing deprecated function with wrong params')
def fit(self, X, y):
"""Incorrect docstring but should not be tested"""
def test_check_docstring_parameters():
pytest.importorskip('numpydoc',
reason="numpydoc is required to test the docstrings")
incorrect = check_docstring_parameters(f_ok)
assert incorrect == []
incorrect = check_docstring_parameters(f_ok, ignore=['b'])
assert incorrect == []
incorrect = check_docstring_parameters(f_missing, ignore=['b'])
assert incorrect == []
with pytest.raises(RuntimeError, match="Unknown section Results"):
check_docstring_parameters(f_bad_sections)
with pytest.raises(RuntimeError, match="Unknown section Parameter"):
check_docstring_parameters(Klass.f_bad_sections)
incorrect = check_docstring_parameters(f_check_param_definition)
assert (
incorrect == [
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('a: int')",
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('b:')",
"sklearn.utils.tests.test_testing.f_check_param_definition "
"Parameter 'c :' has an empty type spec. Remove the colon",
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('d:int')",
])
messages = [
["In function: sklearn.utils.tests.test_testing.f_bad_order",
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'b' != 'a'",
"Full diff:",
"- ['b', 'a']",
"+ ['a', 'b']"],
["In function: " +
"sklearn.utils.tests.test_testing.f_too_many_param_docstring",
"Parameters in function docstring have more items w.r.t. function"
" signature, first extra item: c",
"Full diff:",
"- ['a', 'b']",
"+ ['a', 'b', 'c']",
"? +++++"],
["In function: sklearn.utils.tests.test_testing.f_missing",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: b",
"Full diff:",
"- ['a', 'b']",
"+ ['a']"],
["In function: sklearn.utils.tests.test_testing.Klass.f_missing",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X', 'y']",
"+ []"],
["In function: " +
"sklearn.utils.tests.test_testing.MockMetaEstimator.predict",
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'X' != 'y'",
"Full diff:",
"- ['X']",
"? ^",
"+ ['y']",
"? ^"],
["In function: " +
"sklearn.utils.tests.test_testing.MockMetaEstimator."
+ "predict_proba",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X']",
"+ []"],
["In function: " +
"sklearn.utils.tests.test_testing.MockMetaEstimator.score",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X']",
"+ []"],
["In function: " +
"sklearn.utils.tests.test_testing.MockMetaEstimator.fit",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X', 'y']",
"+ []"],
]
mock_meta = MockMetaEstimator(delegate=MockEst())
for msg, f in zip(messages,
[f_bad_order,
f_too_many_param_docstring,
f_missing,
Klass.f_missing,
mock_meta.predict,
mock_meta.predict_proba,
mock_meta.score,
mock_meta.fit]):
incorrect = check_docstring_parameters(f)
assert msg == incorrect, ('\n"%s"\n not in \n"%s"' % (msg, incorrect))
class RegistrationCounter:
def __init__(self):
self.nb_calls = 0
def __call__(self, to_register_func):
self.nb_calls += 1
assert to_register_func.func is _delete_folder
def check_memmap(input_array, mmap_data, mmap_mode='r'):
assert isinstance(mmap_data, np.memmap)
writeable = mmap_mode != 'r'
assert mmap_data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, mmap_data)
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, 'register', registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
check_memmap(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = 'r+'
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
check_memmap(input_array, data, mmap_mode=mmap_mode)
temp_folder = os.path.dirname(data.filename)
if os.name != 'nt':
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
def test_create_memmap_backed_data(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, 'register', registration_counter)
input_array = np.ones(3)
data = create_memmap_backed_data(input_array)
check_memmap(input_array, data)
assert registration_counter.nb_calls == 1
data, folder = create_memmap_backed_data(input_array,
return_folder=True)
check_memmap(input_array, data)
assert folder == os.path.dirname(data.filename)
assert registration_counter.nb_calls == 2
mmap_mode = 'r+'
data = create_memmap_backed_data(input_array, mmap_mode=mmap_mode)
check_memmap(input_array, data, mmap_mode)
assert registration_counter.nb_calls == 3
input_list = [input_array, input_array + 1, input_array + 2]
mmap_data_list = create_memmap_backed_data(input_list)
for input_array, data in zip(input_list, mmap_data_list):
check_memmap(input_array, data)
assert registration_counter.nb_calls == 4
@pytest.mark.parametrize(
"constructor_name, container_type",
[('list', list),
('tuple', tuple),
('array', np.ndarray),
('sparse', sparse.csr_matrix),
('dataframe', pytest.importorskip('pandas').DataFrame),
('series', pytest.importorskip('pandas').Series),
('index', pytest.importorskip('pandas').Index),
('slice', slice)]
)
def test_convert_container(constructor_name, container_type):
container = [0, 1]
assert isinstance(_convert_container(container, constructor_name),
container_type)
def test_raises():
# Tests for the raises context manager
# Proper type, no match
with raises(TypeError):
raise TypeError()
# Proper type, proper match
with raises(TypeError, match="how are you") as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# Proper type, proper match with multiple patterns
with raises(TypeError, match=["not this one", "how are you"]) as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# bad type, no match
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError) as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# Bad type, no match, with a err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, err_msg="the failure message") as cm:
raise ValueError()
assert not cm.raised_and_matched
# bad type, with match (is ignored anyway)
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError, match="this is ignored") as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# proper type but bad match
with pytest.raises(
AssertionError, match="should contain one of the following patterns"
):
with raises(TypeError, match="hello") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# proper type but bad match, with err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(
TypeError, match="hello", err_msg="the failure message"
) as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# no raise with default may_pass=False
with pytest.raises(AssertionError, match="Did not raise"):
with raises(TypeError) as cm:
pass
assert not cm.raised_and_matched
# no raise with may_pass=True
with raises(TypeError, match="hello", may_pass=True) as cm:
pass # still OK
assert not cm.raised_and_matched
# Multiple exception types:
with raises((TypeError, ValueError)):
raise TypeError()
with raises((TypeError, ValueError)):
raise ValueError()
with pytest.raises(AssertionError):
with raises((TypeError, ValueError)):
pass
|
anntzer/scikit-learn
|
sklearn/utils/tests/test_testing.py
|
Python
|
bsd-3-clause
| 21,218
|
# Third Party Stuff
import pytest
from django.core.urlresolvers import reverse
pytestmark = pytest.mark.django_db
def test_google_analytics_rendering(client, settings):
url = reverse('pages:homepage')
response = client.get(url)
assert 'UA-MY-ID' not in response.content
settings.SITE_VARIABLES['google_analytics_id'] = 'UA-MY-ID'
response = client.get(url)
assert 'UA-MY-ID' in response.content
|
farhaanbukhsh/junction
|
tests/integrations/test_google_analytics.py
|
Python
|
mit
| 424
|
from math import pi
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
###############################################################################
# Define materials
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cm3', 10.29769)
uo2.add_element('U', 1., enrichment=2.4)
uo2.add_element('O', 2.)
helium = openmc.Material(name='Helium for gap')
helium.set_density('g/cm3', 0.001598)
helium.add_element('He', 2.4044e-4)
zircaloy = openmc.Material(name='Zircaloy 4')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_element('Sn', 0.014, 'wo')
zircaloy.add_element('Fe', 0.00165, 'wo')
zircaloy.add_element('Cr', 0.001, 'wo')
zircaloy.add_element('Zr', 0.98335, 'wo')
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 0.740582)
borated_water.add_element('B', 4.0e-5)
borated_water.add_element('H', 5.0e-2)
borated_water.add_element('O', 2.4e-2)
borated_water.add_s_alpha_beta('c_H_in_H2O')
###############################################################################
# Create geometry
###############################################################################
# Define surfaces
pitch = 1.25984
fuel_or = openmc.ZCylinder(r=0.39218, name='Fuel OR')
clad_ir = openmc.ZCylinder(r=0.40005, name='Clad IR')
clad_or = openmc.ZCylinder(r=0.45720, name='Clad OR')
box = openmc.model.rectangular_prism(pitch, pitch, boundary_type='reflective')
# Define cells
fuel = openmc.Cell(fill=uo2, region=-fuel_or)
gap = openmc.Cell(fill=helium, region=+fuel_or & -clad_ir)
clad = openmc.Cell(fill=zircaloy, region=+clad_ir & -clad_or)
water = openmc.Cell(fill=borated_water, region=+clad_or & box)
# Define overall geometry
geometry = openmc.Geometry([fuel, gap, clad, water])
###############################################################################
# Set volumes of depletable materials
###############################################################################
# Set material volume for depletion. For 2D simulations, this should be an area.
uo2.volume = pi * fuel_or.r**2
###############################################################################
# Transport calculation settings
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.source.Source(space=uniform_dist)
entropy_mesh = openmc.RegularMesh()
entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]
entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]
entropy_mesh.dimension = [10, 10, 1]
settings.entropy_mesh = entropy_mesh
###############################################################################
# Initialize and run depletion calculation
###############################################################################
# Create depletion "operator"
chain_file = './chain_simple.xml'
op = openmc.deplete.Operator(geometry, settings, chain_file)
# Perform simulation using the predictor algorithm
time_steps = [1.0, 1.0, 1.0, 1.0, 1.0] # days
power = 174 # W/cm, for 2D simulations only (use W for 3D)
integrator = openmc.deplete.PredictorIntegrator(op, time_steps, power, timestep_units='d')
integrator.integrate()
###############################################################################
# Read depletion calculation results
###############################################################################
# Open results file
results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
# Obtain K_eff as a function of time
time, keff = results.get_eigenvalue()
# Obtain U235 concentration as a function of time
time, n_U235 = results.get_atoms('1', 'U235')
# Obtain Xe135 capture reaction rate as a function of time
time, Xe_capture = results.get_reaction_rate('1', 'Xe135', '(n,gamma)')
###############################################################################
# Generate plots
###############################################################################
days = 24*60*60
plt.figure()
plt.plot(time/days, keff, label="K-effective")
plt.xlabel("Time (days)")
plt.ylabel("Keff")
plt.show()
plt.figure()
plt.plot(time/days, n_U235, label="U235")
plt.xlabel("Time (days)")
plt.ylabel("n U5 (-)")
plt.show()
plt.figure()
plt.plot(time/days, Xe_capture, label="Xe135 capture")
plt.xlabel("Time (days)")
plt.ylabel("RR (-)")
plt.show()
plt.close('all')
|
tjlaboss/openmc
|
examples/pincell_depletion/run_depletion.py
|
Python
|
mit
| 5,021
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
consumer_table = sql.Table(
'consumer',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('description', sql.String(64), nullable=False),
sql.Column('secret', sql.String(64), nullable=False),
sql.Column('extra', sql.Text(), nullable=False))
consumer_table.create(migrate_engine, checkfirst=True)
request_token_table = sql.Table(
'request_token',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('request_secret', sql.String(64), nullable=False),
sql.Column('verifier', sql.String(64), nullable=True),
sql.Column('authorizing_user_id', sql.String(64), nullable=True),
sql.Column('requested_project_id', sql.String(64), nullable=False),
sql.Column('requested_roles', sql.Text(), nullable=False),
sql.Column('consumer_id', sql.String(64), nullable=False, index=True),
sql.Column('expires_at', sql.String(64), nullable=True))
request_token_table.create(migrate_engine, checkfirst=True)
access_token_table = sql.Table(
'access_token',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('access_secret', sql.String(64), nullable=False),
sql.Column('authorizing_user_id', sql.String(64),
nullable=False, index=True),
sql.Column('project_id', sql.String(64), nullable=False),
sql.Column('requested_roles', sql.Text(), nullable=False),
sql.Column('consumer_id', sql.String(64), nullable=False),
sql.Column('expires_at', sql.String(64), nullable=True))
access_token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# Operations to reverse the above upgrade go here.
tables = ['consumer', 'request_token', 'access_token']
for table_name in tables:
table = sql.Table(table_name, meta, autoload=True)
table.drop()
|
dsiddharth/access-keys
|
keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
|
Python
|
apache-2.0
| 2,918
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Common data storage and utilities"""
import sys
import nicknamer
class Common:
# analysis_level_ludicrous
# Adverb tries too hard to cross reference data
# Use these switches to turn some of the biggest offenders off
per_link_detail = True
message_progress_tables = False
# returned from argparse.parse_args()
args = None
# first letter of the connection names
log_char_base = 'A'
# number of logs processed
n_logs = 0
# array of file name strings from command line
# len=n_logs
log_fns = []
# discovered router container names
# len=n_logs
router_ids = [] # raw long names
# router display names shortened with popups
router_display_names = []
# router modes in plain text
router_modes = []
# list of router-instance lists
# [[A0, A1], [B0], [C0, C1, C2]]
routers = []
# ordered list of connection names across all routers
all_conn_names = []
# conn_details_map -
# key=conn_id, val=ConnectionDetail for that connection
conn_details_map = {}
# mapping of connected routers by connection id
# A0_1 is connected to B3_2
# key = full conn_id 'A0_5'
# val = full conn_id 'B0_8'
# note names[key]=val and names[val]=key mutual reference
conn_peers_connid = {}
# short display name for peer indexed by connection id
# A0_1 maps to B's container_name nickname
conn_peers_display = {}
# conn_to_frame_map - global list for easier iteration in main
# key = conn_id full A0_3
# val = list of plf lines
conn_to_frame_map = {}
shorteners = nicknamer.Shorteners()
# when --no-data is in effect, how many log lines were skipped?
data_skipped = 0
# List of router log module names to include verbatim.
# Defaults to "SCRAPER". Overridden by command line.
verbatim_include_list = ["SCRAPER"]
def router_id_index(self, id):
"""
Given a router full container name, return the index in router_ids table
Throw value error if not found
:param id:
:return:
"""
return self.router_ids.index(id)
def module_key_in_line(self, key, line):
'''
Sense if the key is a log module name in the log line.
The name can't be too far into the string or else it finds
false positives when a user uses qdstat to get a log file.
MAX_POSITION defines what constitutes 'too far'.
:param key:
:param line:
:return:
'''
MAX_POSITION = 40
assert len(key) > 0
st = line.find(key)
return st >= 0 and st <= MAX_POSITION
def log_letter_of(idx):
'''
Return the letter A, B, C, ... from the index 0..n
:param idx:
:return: A..Z
'''
if idx >= 26:
sys.exit('ERROR: too many log files')
return "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[idx]
def index_of_log_letter(letter):
'''
Return the index 0..25 of the firster letter of the 'letter' string
Raise error if out of range
:param letter:
:return:
'''
val = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".find(letter[0].upper())
if val < 0 or val > 25:
raise ValueError("index_of_log_letter Invalid log letter: %s", letter)
return val
class RestartRec():
def __init__(self, _id, _router, _event, _datetime):
self.id = _id
self.router = _router
self.event = _event
self.datetime = _datetime
def transfer_is_possibly_unsettled(plf):
return (plf.data.transfer and not plf.data.transfer_more and
not (plf.data.transfer_settled or plf.data.final_disposition is not None))
global_colors = {
"errors": "yellow",
"unsettled": "tomato",
"presettled": "aqua",
"accepted": "aquamarine",
"rejected": "orange", # hard coded in resolve_settlement
"released": "orange",
"modified": "orange",
"aborted": "crimson",
"more": "chartreuse",
"drain": "gold",
"no_credit": "beige"
}
def color_of(obj_type):
return global_colors.get(obj_type, "pink")
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
}
def html_escape(text):
return "".join(html_escape_table.get(c, c) for c in text)
def strings_of_proton_log(text):
'''
Given a transfer log text string like:
"\x00SpE\x00Ss\xd0\x00\x00\x002\x00\x00\x00\x06@@@@\xa1$amqp:/_edge/EB1/temp.RkCWe_Is4jc3bcN\xa1\x0232\x00St\xd1\x00\x00\x00\x8c\x00\x00\x00\x0c\xa1\x04name\xa1\x04self\xa1\x04type\xa1\x13org.amqp.management\xa1\x09operation\xa1\x05QUERY\xa1\x0aentityType\xa1'org.apache.qpid.dispatch.router.address\xa1\x06offsetU\x00\xa1\x05count\x81\x00\x00\x00\x00\x00\x00\x01\xf4\x00Sw\xd1\x00\x00\x00Q\x00\x00\x00\x02\xa1\x0eattributeNames\xd0\x00\x00\x008\x00\x00\x00\x04\xa1\x04name\xa1\x0fsubscriberCount\xa1\x0bremoteCount\xa1\x0econtainerCount"
return the strings thereof:
"SpE Ss @@@@ $amqp:/_edge/EB1/temp.RkCWe_Is4jc3bcN name self type org.amqp.management operation QUERY entityType org.apache.qpid.dispatch.router.address offsetU count Sw attributeNames name subscriberCount remoteCount containerCount"
The intended use for this is to help decode management and router frames in the transfer nickname dump.
:param text:
:return: strings embedded in text
'''
r = "" # return result
sstate = 0 # when a backslash is seen, skip this many more input chars
skipping = False
for elem in text:
if sstate > 0:
sstate -= 1
else:
if elem == '\\':
if not skipping:
r += ' '
skipping = True
sstate = 3
else:
skipping = False
r += elem
return r
def ls_eval(text):
'''
Given a router_ls cost string like '{u'A': 1, u'C': 51L, u'B': 101L}',
return a dictionary {A:1, C:51, B:101}
This code replaces ast.literal_eval
'''
result = {}
text = text.strip(" {}")
if len(text) > 0:
items = text.split(', ')
for item in items:
kv = item.split(": ")
key = kv[0].strip()
if key.startswith("u'") or key.startswith('u"'):
key = key[2:-1]
elif key.startswith("'"):
key = key[1:-1]
val = kv[1].strip()
if val.endswith("L"):
val = val[:-1]
result[key] = int(val)
return result
|
ted-ross/qpid-dispatch
|
tools/scraper/common.py
|
Python
|
apache-2.0
| 7,279
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy to export custom proto formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.decision_trees.proto import generic_tree_model_extensions_pb2
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import tag_constants
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE = "%s_%d"
def make_custom_export_strategy(name,
convert_fn,
feature_columns,
export_input_fn):
"""Makes custom exporter of GTFlow tree format.
Args:
name: A string, for the name of the export strategy.
convert_fn: A function that converts the tree proto to desired format and
saves it to the desired location. Can be None to skip conversion.
feature_columns: A list of feature columns.
export_input_fn: A function that takes no arguments and returns an
`InputFnOps`.
Returns:
An `ExportStrategy`.
"""
base_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=export_input_fn)
input_fn = export_input_fn()
(sorted_feature_names, dense_floats, sparse_float_indices, _, _,
sparse_int_indices, _, _) = gbdt_batch.extract_features(
input_fn.features, feature_columns)
def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None):
"""A wrapper to export to SavedModel, and convert it to other formats."""
result_dir = base_strategy.export(estimator, export_dir,
checkpoint_path,
eval_result)
with ops.Graph().as_default() as graph:
with tf_session.Session(graph=graph) as sess:
saved_model_loader.load(
sess, [tag_constants.SERVING], result_dir)
# Note: This is GTFlow internal API and might change.
ensemble_model = graph.get_operation_by_name(
"ensemble_model/TreeEnsembleSerialize")
_, dfec_str = sess.run(ensemble_model.outputs)
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
dtec.ParseFromString(dfec_str)
# Export the result in the same folder as the saved model.
if convert_fn:
convert_fn(dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices),
len(sparse_int_indices), result_dir, eval_result)
feature_importances = _get_feature_importances(
dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices), len(sparse_int_indices))
sorted_by_importance = sorted(
feature_importances.items(), key=lambda x: -x[1])
assets_dir = os.path.join(result_dir, "assets.extra")
gfile.MakeDirs(assets_dir)
with gfile.GFile(os.path.join(assets_dir, "feature_importances"),
"w") as f:
f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance))
return result_dir
return export_strategy.ExportStrategy(name, export_fn)
def convert_to_universal_format(dtec, sorted_feature_names,
num_dense, num_sparse_float,
num_sparse_int,
feature_name_to_proto=None):
"""Convert GTFlow trees to universal format."""
del num_sparse_int # unused.
model_and_features = generic_tree_model_pb2.ModelAndFeatures()
# TODO(jonasz): Feature descriptions should contain information about how each
# feature is processed before it's fed to the model (e.g. bucketing
# information). As of now, this serves as a list of features the model uses.
for feature_name in sorted_feature_names:
if not feature_name_to_proto:
model_and_features.features[feature_name].SetInParent()
else:
model_and_features.features[feature_name].CopyFrom(
feature_name_to_proto[feature_name])
model = model_and_features.model
model.ensemble.summation_combination_technique.SetInParent()
for tree_idx in range(len(dtec.trees)):
gtflow_tree = dtec.trees[tree_idx]
tree_weight = dtec.tree_weights[tree_idx]
member = model.ensemble.members.add()
member.submodel_id.value = tree_idx
tree = member.submodel.decision_tree
for node_idx in range(len(gtflow_tree.nodes)):
gtflow_node = gtflow_tree.nodes[node_idx]
node = tree.nodes.add()
node_type = gtflow_node.WhichOneof("node")
node.node_id.value = node_idx
if node_type == "leaf":
leaf = gtflow_node.leaf
if leaf.HasField("vector"):
for weight in leaf.vector.value:
new_value = node.leaf.vector.value.add()
new_value.float_value = weight * tree_weight
else:
for index, weight in zip(
leaf.sparse_vector.index, leaf.sparse_vector.value):
new_value = node.leaf.sparse_vector.sparse_value[index]
new_value.float_value = weight * tree_weight
else:
node = node.binary_node
# Binary nodes here.
if node_type == "dense_float_binary_split":
split = gtflow_node.dense_float_binary_split
feature_id = split.feature_column
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_left":
split = gtflow_node.sparse_float_binary_split_default_left.split
node.default_direction = (generic_tree_model_pb2.BinaryNode.LEFT)
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = (
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE %
(sorted_feature_names[feature_id], split.dimension_id))
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_right":
split = gtflow_node.sparse_float_binary_split_default_right.split
node.default_direction = (
generic_tree_model_pb2.BinaryNode.RIGHT)
# TODO(nponomareva): adjust this id assignement when we allow multi-
# column sparse tensors.
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = (
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE %
(sorted_feature_names[feature_id], split.dimension_id))
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "categorical_id_binary_split":
split = gtflow_node.categorical_id_binary_split
node.default_direction = generic_tree_model_pb2.BinaryNode.RIGHT
feature_id = split.feature_column + num_dense + num_sparse_float
categorical_test = (
generic_tree_model_extensions_pb2.MatchingValuesTest())
categorical_test.feature_id.id.value = sorted_feature_names[
feature_id]
matching_id = categorical_test.value.add()
matching_id.int64_value = split.feature_id
node.custom_left_child_test.Pack(categorical_test)
else:
raise ValueError("Unexpected node type %s", node_type)
node.left_child_id.value = split.left_id
node.right_child_id.value = split.right_id
return model_and_features
def _get_feature_importances(dtec, feature_names, num_dense_floats,
num_sparse_float, num_sparse_int):
"""Export the feature importance per feature column."""
del num_sparse_int # Unused.
sums = collections.defaultdict(lambda: 0)
for tree_idx in range(len(dtec.trees)):
tree = dtec.trees[tree_idx]
for tree_node in tree.nodes:
node_type = tree_node.WhichOneof("node")
if node_type == "dense_float_binary_split":
split = tree_node.dense_float_binary_split
split_column = feature_names[split.feature_column]
elif node_type == "sparse_float_binary_split_default_left":
split = tree_node.sparse_float_binary_split_default_left.split
split_column = _SPARSE_FLOAT_FEATURE_NAME_TEMPLATE % (
feature_names[split.feature_column + num_dense_floats],
split.dimension_id)
elif node_type == "sparse_float_binary_split_default_right":
split = tree_node.sparse_float_binary_split_default_right.split
split_column = _SPARSE_FLOAT_FEATURE_NAME_TEMPLATE % (
feature_names[split.feature_column + num_dense_floats],
split.dimension_id)
elif node_type == "categorical_id_binary_split":
split = tree_node.categorical_id_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "categorical_id_set_membership_binary_split":
split = tree_node.categorical_id_set_membership_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "leaf":
assert tree_node.node_metadata.gain == 0
continue
else:
raise ValueError("Unexpected split type %s", node_type)
# Apply shrinkage factor. It is important since it is not always uniform
# across different trees.
sums[split_column] += (
tree_node.node_metadata.gain * dtec.tree_weights[tree_idx])
return dict(sums)
|
eadgarchen/tensorflow
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py
|
Python
|
apache-2.0
| 11,260
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# All rights reserved.
"""
Views for managing volumes.
"""
from django.conf import settings # noqa
from django.core.urlresolvers import reverse # noqa
from django.forms import ValidationError # noqa
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon.utils import functions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.project.images_and_snapshots import utils
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.usage import quotas
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Volume Name"))
description = forms.CharField(widget=forms.Textarea,
label=_("Description"), required=False)
type = forms.ChoiceField(label=_("Type"),
required=False)
size = forms.IntegerField(min_value=1, label=_("Size (GB)"))
volume_source_type = forms.ChoiceField(label=_("Volume Source"),
required=False)
snapshot_source = forms.ChoiceField(
label=_("Use snapshot as a source"),
widget=fields.SelectWidget(
attrs={'class': 'snapshot-selector'},
data_attrs=('size', 'display_name'),
transform=lambda x: "%s (%sGB)" % (x.display_name, x.size)),
required=False)
image_source = forms.ChoiceField(
label=_("Use image as a source"),
widget=fields.SelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
volume_types = cinder.volume_type_list(request)
self.fields['type'].choices = [("", "")] + \
[(type.name, type.name)
for type in volume_types]
if ("snapshot_id" in request.GET):
try:
snapshot = self.get_snapshot(request,
request.GET["snapshot_id"])
self.fields['name'].initial = snapshot.display_name
self.fields['size'].initial = snapshot.size
self.fields['snapshot_source'].choices = ((snapshot.id,
snapshot),)
try:
# Set the volume type from the original volume
orig_volume = cinder.volume_get(request,
snapshot.volume_id)
self.fields['type'].initial = orig_volume.volume_type
except Exception:
pass
self.fields['size'].help_text = _('Volume size must be equal '
'to or greater than the snapshot size (%sGB)'
% snapshot.size)
del self.fields['image_source']
del self.fields['volume_source_type']
except Exception:
exceptions.handle(request,
_('Unable to load the specified snapshot.'))
elif ('image_id' in request.GET):
try:
image = self.get_image(request,
request.GET["image_id"])
image.bytes = image.size
self.fields['name'].initial = image.name
self.fields['size'].initial = functions.bytes_to_gigabytes(
image.size)
self.fields['image_source'].choices = ((image.id, image),)
self.fields['size'].help_text = _('Volume size must be equal '
'to or greater than the image size (%s)'
% filesizeformat(image.size))
del self.fields['snapshot_source']
del self.fields['volume_source_type']
except Exception:
msg = _('Unable to load the specified image. %s')
exceptions.handle(request, msg % request.GET['image_id'])
else:
source_type_choices = []
try:
snapshots = cinder.volume_snapshot_list(request)
if snapshots:
source_type_choices.append(("snapshot_source",
_("Snapshot")))
choices = [('', _("Choose a snapshot"))] + \
[(s.id, s) for s in snapshots]
self.fields['snapshot_source'].choices = choices
else:
del self.fields['snapshot_source']
except Exception:
exceptions.handle(request, _("Unable to retrieve "
"volume snapshots."))
images = utils.get_available_images(request,
request.user.tenant_id)
if images:
source_type_choices.append(("image_source", _("Image")))
choices = [('', _("Choose an image"))]
for image in images:
image.bytes = image.size
image.size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
self.fields['image_source'].choices = choices
else:
del self.fields['image_source']
if source_type_choices:
choices = ([('no_source_type',
_("No source, empty volume."))] +
source_type_choices)
self.fields['volume_source_type'].choices = choices
else:
del self.fields['volume_source_type']
def handle(self, request, data):
try:
usages = quotas.tenant_limit_usages(self.request)
availableGB = usages['maxTotalVolumeGigabytes'] - \
usages['gigabytesUsed']
availableVol = usages['maxTotalVolumes'] - usages['volumesUsed']
snapshot_id = None
image_id = None
source_type = data.get('volume_source_type', None)
if (data.get("snapshot_source", None) and
source_type in [None, 'snapshot_source']):
# Create from Snapshot
snapshot = self.get_snapshot(request,
data["snapshot_source"])
snapshot_id = snapshot.id
if (data['size'] < snapshot.size):
error_message = _('The volume size cannot be less than '
'the snapshot size (%sGB)' %
snapshot.size)
raise ValidationError(error_message)
elif (data.get("image_source", None) and
source_type in [None, 'image_source']):
# Create from Snapshot
image = self.get_image(request,
data["image_source"])
image_id = image.id
image_size = functions.bytes_to_gigabytes(image.size)
if (data['size'] < image_size):
error_message = _('The volume size cannot be less than '
'the image size (%s)' %
filesizeformat(image.size))
raise ValidationError(error_message)
else:
if type(data['size']) is str:
data['size'] = int(data['size'])
if availableGB < data['size']:
error_message = _('A volume of %(req)iGB cannot be created as '
'you only have %(avail)iGB of your quota '
'available.')
params = {'req': data['size'],
'avail': availableGB}
raise ValidationError(error_message % params)
elif availableVol <= 0:
error_message = _('You are already using all of your available'
' volumes.')
raise ValidationError(error_message)
metadata = {}
volume = cinder.volume_create(request,
data['size'],
data['name'],
data['description'],
data['type'],
snapshot_id=snapshot_id,
image_id=image_id,
metadata=metadata)
message = _('Creating volume "%s"') % data['name']
messages.info(request, message)
return volume
except ValidationError as e:
self.api_error(e.messages[0])
return False
except Exception:
exceptions.handle(request, ignore=True)
self.api_error(_("Unable to create volume."))
return False
@memoized
def get_snapshot(self, request, id):
return cinder.volume_snapshot_get(request, id)
@memoized
def get_image(self, request, id):
return glance.image_get(request, id)
class AttachForm(forms.SelfHandlingForm):
instance = forms.ChoiceField(label=_("Attach to Instance"),
help_text=_("Select an instance to "
"attach to."))
device = forms.CharField(label=_("Device Name"))
def __init__(self, *args, **kwargs):
super(AttachForm, self).__init__(*args, **kwargs)
# Hide the device field if the hypervisor doesn't support it.
hypervisor_features = getattr(settings,
"OPENSTACK_HYPERVISOR_FEATURES",
{})
can_set_mount_point = hypervisor_features.get("can_set_mount_point",
True)
if not can_set_mount_point:
self.fields['device'].widget = forms.widgets.HiddenInput()
self.fields['device'].required = False
# populate volume_id
volume = kwargs.get('initial', {}).get("volume", None)
if volume:
volume_id = volume.id
else:
volume_id = None
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
# Populate instance choices
instance_list = kwargs.get('initial', {}).get('instances', [])
instances = []
for instance in instance_list:
if instance.status in tables.ACTIVE_STATES and \
not any(instance.id == att["server_id"]
for att in volume.attachments):
instances.append((instance.id, '%s (%s)' % (instance.name,
instance.id)))
if instances:
instances.insert(0, ("", _("Select an instance")))
else:
instances = (("", _("No instances available")),)
self.fields['instance'].choices = instances
def handle(self, request, data):
instance_choices = dict(self.fields['instance'].choices)
instance_name = instance_choices.get(data['instance'],
_("Unknown instance (None)"))
# The name of the instance in the choices list has the ID appended to
# it, so let's slice that off...
instance_name = instance_name.rsplit(" (")[0]
try:
attach = api.nova.instance_volume_attach(request,
data['volume_id'],
data['instance'],
data.get('device', ''))
volume = cinder.volume_get(request, data['volume_id'])
if not volume.display_name:
volume_name = volume.id
else:
volume_name = volume.display_name
message = _('Attaching volume %(vol)s to instance '
'%(inst)s on %(dev)s.') % {"vol": volume_name,
"inst": instance_name,
"dev": attach.device}
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to attach volume.'),
redirect=redirect)
class CreateSnapshotForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Snapshot Name"))
description = forms.CharField(widget=forms.Textarea,
label=_("Description"), required=False)
def __init__(self, request, *args, **kwargs):
super(CreateSnapshotForm, self).__init__(request, *args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
def handle(self, request, data):
try:
snapshot = cinder.volume_snapshot_create(request,
data['volume_id'],
data['name'],
data['description'])
message = _('Creating volume snapshot "%s"') % data['name']
messages.info(request, message)
return snapshot
except Exception:
redirect = reverse("horizon:project:images_and_snapshots:index")
exceptions.handle(request,
_('Unable to create volume snapshot.'),
redirect=redirect)
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/volumes/forms.py
|
Python
|
apache-2.0
| 14,674
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
import sys
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'optimize09.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {'constant_memory': True, 'in_memory': False})
worksheet = workbook.add_worksheet()
smiley = "\u263a"
if sys.version_info[0] == 2:
smiley = unichr(9786)
worksheet.write('A1', smiley)
workbook.close()
self.assertExcelEqual()
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/comparison/test_optimize09.py
|
Python
|
bsd-2-clause
| 1,168
|