repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/pylint/test/functional/unrecognized_inline_option.py
|
6
|
# +1: [unrecognized-inline-option]
# pylint:bouboule=1
"""Check unknown option"""
|
702nADOS/sumo
|
refs/heads/master
|
tools/contributed/sumopy/coremodules/simulation/results.py
|
1
|
import os
import sys
import string
import types
from xml.sax import saxutils, parse, handler # , make_parser
from collections import OrderedDict
import numpy as np
from coremodules.modules_common import *
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.geometry import *
from agilepy.lib_base.processes import Process, CmlMixin, ff, call, P
from coremodules.network.network import SumoIdsConf
def load_results(filepath, parent=None, logger=None):
# typically parent is the scenario
results = cm.load_obj(filepath, parent=parent)
if logger != None:
results.set_logger(logger)
return results
class Tripresults(am.ArrayObjman):
def __init__(self, parent, trips, edges, is_add_default=True, **kwargs):
self._init_objman(ident='tripresults',
parent=parent, # main results object
name='Trip results',
info='Table with simulation results for each trip made.',
**kwargs)
self.add_col(am.IdsArrayConf('ids_trip', trips,
groupnames=['state'],
is_index=True,
name='ID trip',
info='ID of trip.',
))
attrinfos = OrderedDict([
('duration', {'name': 'Duration', 'xmltag': 'duration', 'unit': 's',
'default': 0, 'info': 'Trip duration', 'groupnames': ['tripdata']}),
('depart', {'name': 'Dep. time', 'xmltag': 'depart', 'unit': 's',
'default': 0, 'info': 'Departure time', 'groupnames': ['tripdata']}),
('arrival', {'name': 'Arr. time', 'xmltag': 'arrival', 'unit': 's',
'default': 0, 'info': 'Departure time', 'groupnames': ['tripdata']}),
('departPos', {'name': 'depart pos', 'xmltag': 'departPos', 'unit': 'm',
'default': 0.0, 'info': 'depart position', 'groupnames': ['tripdata']}),
('arrivalPos', {'name': 'arrival pos', 'xmltag': 'arrivalPos', 'unit': 'm',
'default': 0.0, 'info': 'arrival position', 'groupnames': ['tripdata']}),
('routeLength', {'name': 'Length', 'xmltag': 'routeLength', 'unit': 'm',
'default': 0.0, 'info': 'Route length', 'groupnames': ['tripdata']}),
('waitSteps', {'name': 'wait steps', 'xmltag': 'waitSteps', 'unit': None, 'default': 0,
'info': 'Time steps, the vehicle has been waiting during its trip', 'groupnames': ['tripdata']}),
('rerouteNo', {'name': 'reroute No', 'xmltag': 'rerouteNo', 'unit': None,
'default': 0, 'info': 'Number of re-routes', 'groupnames': ['tripdata']}),
])
for attrname, kwargs in attrinfos.iteritems():
self.add_resultattr(attrname, **kwargs)
# this is special for route info
self.add_col(am.IdlistsArrayConf('ids_edges', edges,
name='Edge IDs',
groupnames=['routeinfo'],
info='List of edge IDs constituting the actually taken route.',
xmltag='edges',
))
def add_resultattr(self, attrname, **kwargs):
# default cannot be kwarg
default = kwargs['default']
del kwargs['default']
if kwargs.has_key('groupnames'):
kwargs['groupnames'].append('results')
else:
kwargs['groupnames'] = ['results']
self.add_col(am.ArrayConf(attrname, default, **kwargs))
def import_routesdata(self, filepath):
# TODO
pass
def import_tripdata(self, filepath):
# print 'import_tripdata',filepath,self.get_group('tripdata')
self.import_sumoxml(filepath, self.get_group('tripdata'))
def import_sumoxml(self, filepath, attrconfigs):
element = 'tripinfo'
# print 'import_sumoxml',element
#id_type = 'edge',
#reader = 'interval',
ids_raw, results, interval = read_interval2(
filepath, element, attrconfigs)
# this procedure is necessary to create new result ids only
# for trips that are not yet in the database
n = len(ids_raw)
# print ' n',n
ind_range = np.arange(n, dtype=np.int32)
ids = np.zeros(n, dtype=np.int32)
for i in ind_range:
id_trip = int(ids_raw[i])
if self.ids_trip.has_index(id_trip):
ids[i] = self.ids_trip.get_id_from_index(id_trip)
else:
ids[i] = self.add_row(ids_trip=id_trip)
for attrconfig in attrconfigs:
attrname = attrconfig.attrname
default = attrconfig.get_default()
if type(default) in (types.IntType, types.LongType):
conversion = 'i' # int
values_attr = np.zeros(n, int)
elif type(default) in (types.FloatType, types.ComplexType):
conversion = 'f' # float
values_attr = np.zeros(n, float)
else:
conversion = 's' # str
values_attr = np.zeros(n, obj)
# this is a tricky way to read the data stored in
# dictionarie into array tructures as used in results
# problem is that not all dictionaries have all ids
for i in ind_range:
val = results[attrname].get(ids_raw[i], default)
if conversion == 'i':
val = int(val)
else:
values_attr[i] = val
# print ' attrname',attrname,conversion,val,type(val)
values_attr[i] = val
# print ' attrname',attrname
# print ' ids',type(ids),ids
# print ' values_attr',type(values_attr),values_attr
attrconfig.set(ids, values_attr)
class Edgeresults(am.ArrayObjman):
def __init__(self, parent, edges, is_add_default=True, **kwargs):
self._init_objman(ident='edgeresults',
parent=parent, # main results object
name='Edge results',
info='Table with simulation results for each network edge.',
#xmltag = ('vtypes','vtype','ids_sumo'),
**kwargs)
self.add_col(am.IdsArrayConf('ids_edge', edges,
groupnames=['state'],
is_index=True,
name='ID edge',
info='ID of edge.',
))
attrinfos = OrderedDict([
('entered', {'name': 'Entered', 'unit': None, 'default': 0,
'info': 'Entered number of vehicles', 'xmltag': 'entered', 'groupnames': ['edgedata']}),
('left', {'name': 'Left', 'unit': None, 'default': 0,
'info': 'Left number of vehicles', 'xmltag': 'left', 'groupnames': ['edgedata']}),
('arrived', {'name': 'Arrived', 'unit': None, 'default': 0,
'info': 'Arrived number of vehicles', 'xmltag': 'arrived', 'groupnames': ['edgedata']}),
('departed', {'name': 'Departed', 'unit': None, 'default': 0,
'info': 'Departed number of vehicles', 'xmltag': 'departed', 'groupnames': ['edgedata']}),
('traveltime', {'name': 'Av. times', 'unit': 's', 'default': 0.0, 'info': 'Av. Travel times',
'xmltag': 'traveltime', 'groupnames': ['edgedata'], 'is_average': True}),
('density', {'name': 'Av. Densities', 'unit': 'veh/km', 'default': 0.0,
'info': 'Av. Density in vehicles of vehicles on this Edge/Lane', 'xmltag': 'density', 'groupnames': ['edgedata'], 'is_average': True}),
('waitingTime', {'name': 'Av. waits', 'unit': 's', 'default': 0.0, 'info': 'Av. Waiting times of vehicles on this Edge/Lane',
'xmltag': 'waitingTime', 'groupnames': ['edgedata'], 'is_average': True}),
('speed', {'name': 'Av. speeds', 'unit': 'm/s', 'default': 0.0,
'info': 'Av. velocity of vehicles on this Edge/Lane', 'xmltag': 'speed', 'groupnames': ['edgedata'], 'is_average': True}),
('fuel_abs', {'name': 'Abs. Fuel', 'unit': 'ml', 'default': 0.0,
'info': 'Absolute fuel consumption of vehicles on this Edge/Lane', 'xmltag': 'fuel_abs', 'groupnames': ['edgeemissions']}),
('CO_abs', {'name': 'Abs. CO', 'unit': 'mg', 'default': 0.0,
'info': 'Absolute CO emission of vehicles on this Edge/Lane', 'xmltag': 'CO_abs', 'groupnames': ['edgeemissions']}),
('CO2_abs', {'name': 'Abs. CO2', 'unit': 'mg', 'default': 0.0,
'info': 'Absolute CO2 emission of vehicles on this Edge/Lane', 'xmltag': 'CO2_abs', 'groupnames': ['edgeemissions']}),
('NOx_abs', {'name': 'Abs. NOx', 'unit': 'mg', 'default': 0.0,
'info': 'Absolute NOx emission of vehicles on this Edge/Lane', 'xmltag': 'NOx_abs', 'groupnames': ['edgeemissions']}),
('PMx_abs', {'name': 'Abs. PMx', 'unit': 'mg', 'default': 0.0,
'info': 'Absolute PMx emission (Particle matter, all sizes) of vehicles on this Edge/Lane', 'xmltag': 'PMx_abs', 'groupnames': ['edgeemissions']}),
('fuel_normed', {'name': 'Specific fuel', 'unit': 'l/km/h', 'default': 0.0,
'info': 'Absolute fuel consumption of vehicles on this Edge/Lane', 'xmltag': 'fuel_normed', 'groupnames': ['edgeemissions'], 'is_average': True}),
('CO_normed', {'name': 'Specific CO', 'unit': 'g/km/h', 'default': 0.0,
'info': 'Normalized CO emission of vehicles on this Edge/Lane', 'xmltag': 'CO_normed', 'groupnames': ['edgeemissions'], 'is_average': True}),
('CO2_normed', {'name': 'Specific CO2', 'unit': 'g/km/h', 'default': 0.0,
'info': 'Normalized CO2 emission of vehicles on this Edge/Lane', 'xmltag': 'CO2_normed', 'groupnames': ['edgeemissions'], 'is_average': True}),
('NOx_normed', {'name': 'Specific NOx', 'unit': 'g/km/h', 'default': 0.0,
'info': 'Normalized NOx emission of vehicles on this Edge/Lane', 'xmltag': 'NOx_normed', 'groupnames': ['edgeemissions'], 'is_average': True}),
('PMx_normed', {'name': 'Specific PMx', 'unit': 'g/km/h', 'default': 0.0,
'info': 'Normalized PMx emission of vehicles on this Edge/Lane', 'xmltag': 'PMx_normed', 'groupnames': ['edgeemissions'], 'is_average': True}),
('fuel_perVeh', {'name': 'Fuel per veh.', 'unit': 'ml/veh', 'default': 0.0,
'info': 'Absolute fuel consumption of vehicles on this Edge/Lane', 'xmltag': 'fuel_perVeh', 'groupnames': ['edgeemissions'], 'is_average': True}),
('CO_perVeh', {'name': 'CO per veh.', 'unit': 'mg/veh', 'default': 0.0,
'info': 'CO emission per vehicle on this Edge/Lane', 'xmltag': 'CO_perVeh', 'groupnames': ['edgeemissions'], 'is_average': True}),
('CO2_perVeh', {'name': 'CO2 per veh.', 'unit': 'mg/veh', 'default': 0.0,
'info': 'CO2 emission per vehicle on this Edge/Lane', 'xmltag': 'CO2_perVeh', 'groupnames': ['edgeemissions'], 'is_average': True}),
('NOx_perVeh', {'name': 'NOx per veh.', 'unit': 'mg/veh', 'default': 0.0,
'info': 'NOx emission per vehicle on this Edge/Lane', 'xmltag': 'NOx_perVeh', 'groupnames': ['edgeemissions'], 'is_average': True}),
('PMx_perVeh', {'name': 'PMx per veh.', 'unit': 'mg/veh', 'default': 0.0,
'info': 'PMx emission per vehicle on this Edge/Lane', 'xmltag': 'PMx_perVeh', 'groupnames': ['edgeemissions'], 'is_average': True}),
('noise', {'name': 'Noise', 'unit': 'dB', 'default': 0.0,
'info': 'Noise of vehicles on this Edge/Lane', 'xmltag': 'noise', 'groupnames': ['edgenoise'], 'is_average': True}),
])
for attrname, kwargs in attrinfos.iteritems():
self.add_resultattr(attrname, **kwargs)
def add_resultattr(self, attrname, **kwargs):
# default cannot be kwarg
default = kwargs['default']
del kwargs['default']
if kwargs.has_key('groupnames'):
kwargs['groupnames'].append('results')
else:
kwargs['groupnames'] = ['results']
self.add_col(am.ArrayConf(attrname, default, **kwargs))
def import_edgedata(self, filepath):
# print 'import_edgedata',filepath
# print ' group',self.get_group('edgedata')
#attrnames_data = ['entered','left','arrived','departed']
#attrnames_averaged = ['traveltime','density','waitingTime','speed',]
self.import_sumoxml(filepath, self.get_group('edgedata'))
def import_edgenoise(self, filepath):
# print 'import_edgedata',filepath
self.import_sumoxml(filepath, self.get_group('edgenoise'))
def import_edgeemissions(self, filepath):
# print 'import_edgedata',filepath
#attrnames_data = ['fuel_abs','CO_abs','CO2_abs','NOx_abs','PMx_abs']
#attrnames_averaged = ['fuel_normed','CO_normed','CO2_normed',]
self.import_sumoxml(filepath, self.get_group('edgeemissions'))
def import_sumoxml(self, filepath, attrconfigs):
element = 'edge'
# print 'import_sumoxml',element
#id_type = 'edge',
#reader = 'interval',
ids_sumo, results, interval = read_interval2(
filepath, element, attrconfigs)
# print ' ids_sumo',ids_sumo
# print ' results.keys()',results.keys()
# print ' results',results
# create ids for all colums
# if fileinfo['id_type']=='edge':
# this procedure is necessary to create new result ids only
# for edges that are not yet in the database
ids_sumoedge = self.ids_edge.get_linktab().ids_sumo
n = len(ids_sumo)
# print ' n',n
ind_range = np.arange(n, dtype=np.int32)
ids = np.zeros(n, dtype=np.int32)
for i in ind_range:
id_edge = ids_sumoedge.get_id_from_index(ids_sumo[i])
if self.ids_edge.has_index(id_edge):
ids[i] = self.ids_edge.get_id_from_index(id_edge)
else:
ids[i] = self.add_row(ids_edge=id_edge)
# ids = self.add_row()# here the ids_sumo can be strings too
# elif fileinfo['id_type']=='trip':
# ids = self.tripresults.add_rows_keyrecycle(keys = ids_sumo)#
# print ' ids=',ids
for attrconfig in attrconfigs:
attrname = attrconfig.attrname
# print ' copy',attrname
default = attrconfig.get_default()
if type(default) in (types.IntType, types.LongType):
conversion = 'i' # int
values_attr = np.zeros(n, int)
elif type(default) in (types.FloatType, types.ComplexType):
conversion = 'f' # float
values_attr = np.zeros(n, float)
else:
conversion = 's' # str
values_attr = np.zeros(n, obj)
# this is a tricky way to read the data stored in
# dictionarie into array tructures as used in results
# problem is that not all dictionaries have all ids
for i in ind_range:
val = results[attrname].get(ids_sumo[i], default)
if conversion == 'i':
val = int(val)
else:
values_attr[i] = val
# print ' attrname',attrname,conversion,val,type(val)
values_attr[i] = val
# print ' attrname',attrname
# print ' ids',type(ids),ids
# print ' values_attr',type(values_attr),values_attr
attrconfig.set(ids, values_attr)
class Simresults(cm.BaseObjman):
def __init__(self, ident='simresults', scenario=None,
name='Simulation results',
info='Results of SUMO simulation run.',
outfile_prefix='out',
**kwargs):
# print 'Network.__init__',name,kwargs
rootname = scenario.get_rootfilename()
rootdirpath = scenario.get_workdirpath()
self._init_objman(ident=ident, parent=scenario, name=name,
info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
self.edgeresults = attrsman.add(cm.ObjConf(
Edgeresults(self, scenario.net.edges)))
self.tripresults = attrsman.add(cm.ObjConf(Tripresults(
self, scenario.demand.trips, scenario.net.edges)))
def save(self, filepath=None, is_not_save_parent=True):
if filepath == None:
self.get_scenario().get_rootfilepath() + '.res.obj'
cm.save_obj(self, filepath, is_not_save_parent=is_not_save_parent)
def get_scenario(self):
return self.parent
class IntervalAvReader2(handler.ContentHandler):
"""
Reads edge or lane based intervals
and returns time averaged values for each attribute name.
"""
def __init__(self, element, attrsconfigs_cumulative, attrsconfigs_average):
"""
element is "lane" or "edge" or "tripinfo"
attrnames is a list of attribute names to read.
"""
# print 'IntervalAvReader2'
self._element = element
self._attrsconfigs_cumulative = attrsconfigs_cumulative
self._attrsconfigs_average = attrsconfigs_average
self._time_begin = None
self._time_end = None
self._values = {}
self._ids = []
#self._n_values= {}
self.n_inter = 0
self.n_test = 0
self.n_test2 = 0
# TODO: if we knew here all ids then we
# could create a numeric array per attribute
# idea: pass ids as input arg
for attrsconfig in attrsconfigs_cumulative + attrsconfigs_average:
self._values[attrsconfig.attrname] = {}
# print ' init',attrsconfig.attrname
#self._n_values= {}
def startElement(self, name, attrs):
# if attrs.has_key('id'):
# print ' parse',name,self._element,name == self._element, attrs['id']
if name == 'interval':
self._time_inter = int(
float(attrs['end'])) - int(float(attrs['begin']))
# here we just take the start and end time ofthe whole
# measurement period
if self._time_begin == None: # take very first time only
self._time_begin = int(float(attrs['begin']))
self._time_end = int(float(attrs['end']))
self.n_inter += 1
if name == self._element:
id = attrs['id']
# print '--'
if id not in self._ids:
self._ids.append(id)
# no arrival data availlable if trip has not been finished!!
for attrsconfig in self._attrsconfigs_cumulative:
xmltag = attrsconfig.xmltag
attrname = attrsconfig.attrname
if attrs.has_key(xmltag):
# print '
# attrname',attrname,attrs.has_key(attrname),'*'+attrs[attrname]+'*'
a = attrs[xmltag]
if a.strip() != '':
if self._values[attrname].has_key(id):
self._values[attrname][id] += float(a)
else:
self._values[attrname][id] = float(a)
# if (id in ('1/0to1/1','1/0to2/0')) & (attrname == 'entered'):
# self.n_test+=int(attrs[attrname])
# print ' -read ',id,attrname,attrs[attrname],self.n_test,self._values[attrname][id]
#
# if (id in ('0/0to1/0')) & (attrname == 'left'):
# self.n_test2+=int(attrs[attrname])
# print ' +read
# ',id,attrname,attrs[attrname],self.n_test2,self._values[attrname][id]
for attrsconfig in self._attrsconfigs_average:
xmltag = attrsconfig.xmltag
attrname = attrsconfig.attrname
if attrs.has_key(xmltag):
n = float(self.n_inter)
a = attrs[xmltag]
if a.strip() != '':
if self._values[attrname].has_key(id):
self._values[attrname][id] = (
(n - 1) * self._values[attrname][id] + float(a)) / n
#self._values[attrname][id] += float(a)/self._time_inter
#self._n_values[attrname][id] += 1
else:
self._values[attrname][id] = float(a)
#self._values[attrname][id] = float(a)/self._time_inter
#self._n_values[attrname][id] = 1
def get_data(self):
return self._values
def get_interval(self):
return (self._time_begin, self._time_end)
def get_ids(self):
return self._ids
def read_interval2(filepath, element, attrsconfigs):
# print 'read_interval2'
attrsconfigs_cumulative = []
attrsconfigs_average = []
for attrsconfig in attrsconfigs:
# print ' check',attrsconfig.attrname
if hasattr(attrsconfig, 'is_average'):
if attrsconfig.is_average:
attrsconfigs_average.append(attrsconfig)
else:
attrsconfigs_cumulative.append(attrsconfig)
else:
attrsconfigs_cumulative.append(attrsconfig)
reader = IntervalAvReader2(
element, attrsconfigs_cumulative, attrsconfigs_average)
#parser = make_parser()
# parser.setContentHandler(reader)
#fn = '"'+filepath+'"'
# print 'read_interval >'+fn+'<'
# print ' >'+filepath+'<'
# parser.parse(filepath)
parse(filepath, reader)
return reader.get_ids(), reader.get_data(), reader.get_interval()
|
abhisg/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_ols_3d.py
|
350
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
cortedeltimo/SickRage
|
refs/heads/master
|
lib/sqlalchemy/pool.py
|
75
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the :meth:`.Pool.unique_connection`
method which **does not use thread local context**.
To produce a :class:`.Connection` which refers to the
:meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in (None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the :meth:`.Pool.unique_connection`
method provides a means of bypassing the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.finalize_callback = deque()
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
rec.checkin()
raise
fairy = _ConnectionFairy(dbapi_connection, rec)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and \
_finalize_fairy(
dbapi_connection,
rec, pool, ref, pool._echo)
)
_refs.add(rec)
if pool._echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or :meth:`.Connection.invalidate`
methods are called, as well as when any so-called "automatic invalidation"
condition occurs.
.. seealso::
:ref:`pool_connection_invalidation`
"""
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
self.__pool._close_connection(self.connection)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception as e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record, pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(connection, connection_record)
assert fairy.connection is connection
fairy._reset(pool, echo)
# Immediately close detached instances
if not connection_record:
pool._close_connection(connection)
except Exception as e:
if connection_record:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the :class:`._ConnectionFairy`
object's lifespan is transitory, as it lasts only for the length of a
specific DBAPI connection being checked out from the pool, and additionally
that as a transparent proxy, it is mostly invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record):
self.connection = dbapi_connection
self._connection_record = connection_record
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and do_commit()
methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
fairy._echo = pool._should_log_debug()
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
fairy.connection = fairy._connection_record.get_connection()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool, echo):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e)
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy_ref = None
self._connection_record.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including :paramref:`.Pool.recycle`,
:paramref:`.Pool.echo`, :paramref:`.Pool.reset_on_return` and others
are passed to the :class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
self._dec_overflow()
raise
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
|
aidan-/ansible-modules-extras
|
refs/heads/devel
|
cloud/amazon/redshift.py
|
8
|
#!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift
version_added: "2.2"
short_description: create, delete, or modify an Amazon Redshift instance
description:
- Creates, deletes, or modifies amazon Redshift cluster instances.
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'facts', 'delete', 'modify' ]
identifier:
description:
- Redshift cluster identifier.
required: true
node_type:
description:
- The node type of the cluster. Must be specified when command=create.
choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
username:
description:
- Master database username. Used only when command=create.
password:
description:
- Master database password. Used only when command=create.
cluster_type:
description:
- The type of cluster.
choices: ['multi-node', 'single-node' ]
default: 'single-node'
db_name:
description:
- Name of the database.
default: null
availability_zone:
description:
- availability zone in which to launch cluster
aliases: ['zone', 'aws_zone']
number_of_nodes:
description:
- Number of nodes. Only used when cluster_type=multi-node.
default: null
cluster_subnet_group_name:
description:
- which subnet to place the cluster
aliases: ['subnet']
cluster_security_groups:
description:
- in which security group the cluster belongs
default: null
aliases: ['security_groups']
vpc_security_group_ids:
description:
- VPC security group
aliases: ['vpc_security_groups']
default: null
preferred_maintenance_window:
description:
- maintenance window
aliases: ['maintance_window', 'maint_window']
default: null
cluster_parameter_group_name:
description:
- name of the cluster parameter group
aliases: ['param_group_name']
default: null
automated_snapshot_retention_period:
description:
- period when the snapshot take place
aliases: ['retention_period']
default: null
port:
description:
- which port the cluster is listining
default: null
cluster_version:
description:
- which version the cluster should have
aliases: ['version']
choices: ['1.0']
default: null
allow_version_upgrade:
description:
- flag to determinate if upgrade of version is possible
aliases: ['version_upgrade']
default: true
publicly_accessible:
description:
- if the cluster is accessible publicly or not
default: false
encrypted:
description:
- if the cluster is encrypted or not
default: false
elastic_ip:
description:
- if the cluster has an elastic IP or not
default: null
new_cluster_identifier:
description:
- Only used when command=modify.
aliases: ['new_identifier']
default: null
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ 'boto' ]
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic cluster provisioning example
- redshift: >
command=create
node_type=ds1.xlarge
identifier=new_cluster
username=cluster_admin
password=1nsecure
'''
RETURN = '''
cluster:
description: dictionary containing all the cluster information
returned: success
type: dictionary
contains:
identifier:
description: Id of the cluster.
returned: success
type: string
sample: "new_redshift_cluster"
create_time:
description: Time of the cluster creation as timestamp.
returned: success
type: float
sample: 1430158536.308
status:
description: Stutus of the cluster.
returned: success
type: string
sample: "available"
db_name:
description: Name of the database.
returned: success
type: string
sample: "new_db_name"
availability_zone:
description: Amazon availability zone where the cluster is located.
returned: success
type: string
sample: "us-east-1b"
maintenance_window:
description: Time frame when maintenance/upgrade are done.
returned: success
type: string
sample: "sun:09:30-sun:10:00"
private_ip_address:
description: Private IP address of the main node.
returned: success
type: string
sample: "10.10.10.10"
public_ip_address:
description: Public IP address of the main node.
returned: success
type: string
sample: "0.0.0.0"
port:
description: Port of the cluster.
returned: success
type: int
sample: 5439
url:
description: FQDN of the main cluster node.
returned: success
type: string
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
'''
import time
try:
import boto
from boto import redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier' : resource['ClusterIdentifier'],
'create_time' : resource['ClusterCreateTime'],
'status' : resource['ClusterStatus'],
'username' : resource['MasterUsername'],
'db_name' : resource['DBName'],
'availability_zone' : resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
}
for node in resource['ClusterNodes']:
if node['NodeRole'] in ('SHARED', 'LEADER'):
facts['private_ip_address'] = node['PrivateIPAddress']
break
return facts
def create_cluster(module, redshift):
"""
Create a new cluster
module: AnsibleModule object
redshift: authenticated redshift connection object
Returns:
"""
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
# Package up the optional parameters
params = {}
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port',
'cluster_version', 'allow_version_upgrade',
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip'):
if p in module.params:
params[ p ] = module.params.get( p )
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
changed = False
except boto.exception.JSONResponseError as e:
try:
redshift.create_cluster(identifier, node_type, username, password, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(changed, _collect_facts(resource))
def describe_cluster(module, redshift):
"""
Collect data about the cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def delete_cluster(module, redshift):
"""
Delete a cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
try:
redshift.delete_custer( identifier )
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, {})
def modify_cluster(module, redshift):
"""
Modify an existing cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
params = {}
for p in ('cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port', 'cluster_version',
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
try:
redshift.modify_cluster(identifier, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
# https://github.com/boto/boto/issues/2776 is fixed.
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier = dict(required=True),
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(require=False),
cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups = dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
cluster_subnet_group_name = dict(aliases=['subnet']),
availability_zone = dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name = dict(aliases=['param_group_name']),
automated_snapshot_retention_period = dict(aliases=['retention_period']),
port = dict(type='int'),
cluster_version = dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes = dict(type='int'),
publicly_accessible = dict(type='bool', default=False),
encrypted = dict(type='bool', default=False),
elastic_ip = dict(required=False),
new_cluster_identifier = dict(aliases=['new_identifier']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
command = module.params.get('command')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
changed = True
if command == 'create':
(changed, cluster) = create_cluster(module, conn)
elif command == 'facts':
(changed, cluster) = describe_cluster(module, conn)
elif command == 'delete':
(changed, cluster) = delete_cluster(module, conn)
elif command == 'modify':
(changed, cluster) = modify_cluster(module, conn)
module.exit_json(changed=changed, cluster=cluster)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
miptliot/edx-platform
|
refs/heads/ginkgo_openedu_docker
|
common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
|
172
|
"""
Unit test for stub YouTube implementation.
"""
import unittest
import requests
from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase):
def setUp(self):
super(StubYouTubeServiceTest, self).setUp()
self.server = StubYouTubeService()
self.url = "http://127.0.0.1:{0}/".format(self.server.port)
self.server.config['time_to_response'] = 0.0
self.addCleanup(self.server.shutdown)
def test_unused_url(self):
response = requests.get(self.url + 'unused_url')
self.assertEqual("Unused url", response.content)
@unittest.skip('Failing intermittently due to inconsistent responses from YT. See TE-871')
def test_video_url(self):
response = requests.get(
self.url + 'test_youtube/OEoXaMPEzfM?v=2&alt=jsonc&callback=callback_func'
)
# YouTube metadata for video `OEoXaMPEzfM` states that duration is 116.
self.assertEqual(
'callback_func({"data": {"duration": 116, "message": "I\'m youtube.", "id": "OEoXaMPEzfM"}})',
response.content
)
def test_transcript_url_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t__eq_exist'
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
]), response.content
)
def test_transcript_url_not_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t_neq_exist',
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
]), response.content
)
def test_transcript_not_found(self):
response = requests.get(self.url + 'test_transcripts_youtube/some_id')
self.assertEqual(404, response.status_code)
def test_reset_configuration(self):
reset_config_url = self.url + 'del_config'
# add some configuration data
self.server.config['test_reset'] = 'This is a reset config test'
# reset server configuration
response = requests.delete(reset_config_url)
self.assertEqual(response.status_code, 200)
# ensure that server config dict is empty after successful reset
self.assertEqual(self.server.config, {})
|
ben-hopps/nupic
|
refs/heads/master
|
scripts/run_swarm.py
|
38
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file run_swarm.py
This script is the command-line interface for running swarms in nupic."""
import sys
import os
import optparse
from nupic.swarming import permutations_runner
from nupic.swarming.permutations_runner import DEFAULT_OPTIONS
def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue
if __name__ == "__main__":
runPermutations(sys.argv[1:])
|
joshbohde/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/test/test_func_inspect.py
|
2
|
"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
import functools
from ..func_inspect import filter_args, get_func_name, get_func_code
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
def f2(x):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
def h(x, y=0, *args, **kwargs):
pass
def i(x=1):
pass
def j(x, y, **kwargs):
pass
def k(*args, **kwargs):
pass
class Klass(object):
def f(self, x):
return x
################################################################################
# Tests
def test_filter_args():
yield nose.tools.assert_equal, filter_args(f, [], 1), {'x': 1, 'y': 0}
yield nose.tools.assert_equal, filter_args(f, ['x'], 1), {'y': 0}
yield nose.tools.assert_equal, filter_args(f, ['y'], 0), {'x': 0}
yield nose.tools.assert_equal, filter_args(f, ['y'], 0, y=1), {'x': 0}
yield nose.tools.assert_equal, filter_args(f, ['x', 'y'], 0), {}
yield nose.tools.assert_equal, filter_args(f, [], 0, y=1), {'x':0, 'y':1}
yield nose.tools.assert_equal, filter_args(f, ['y'], x=2, y=1), {'x':2}
yield nose.tools.assert_equal, filter_args(i, [], 2), {'x': 2}
yield nose.tools.assert_equal, filter_args(f2, [], x=1), {'x': 1}
def test_filter_args_method():
obj = Klass()
nose.tools.assert_equal(filter_args(obj.f, [], 1),
{'x': 1, 'self': obj})
def test_filter_varargs():
yield nose.tools.assert_equal, filter_args(h, [], 1), \
{'x': 1, 'y': 0, '*':[], '**':{}}
yield nose.tools.assert_equal, filter_args(h, [], 1, 2, 3, 4), \
{'x': 1, 'y': 2, '*':[3, 4], '**':{}}
yield nose.tools.assert_equal, filter_args(h, [], 1, 25, ee=2), \
{'x': 1, 'y': 25, '*':[], '**':{'ee':2}}
yield nose.tools.assert_equal, filter_args(h, ['*'], 1, 2, 25, ee=2), \
{'x': 1, 'y': 2, '**':{'ee':2}}
def test_filter_kwargs():
nose.tools.assert_equal(filter_args(k, [], 1, 2, ee=2),
{'*': [1, 2], '**':{'ee':2}})
nose.tools.assert_equal(filter_args(k, [], 3, 4),
{'*': [3, 4], '**':{}})
def test_filter_args_2():
nose.tools.assert_equal(filter_args(j, [], 1, 2, ee=2),
{'x': 1, 'y': 2, '**':{'ee':2}})
nose.tools.assert_raises(ValueError, filter_args, f, 'a', None)
# Check that we capture an undefined argument
nose.tools.assert_raises(ValueError, filter_args, f, ['a'], None)
ff = functools.partial(f, 1)
# filter_args has to special-case partial
nose.tools.assert_equal(filter_args(ff, [], 1),
{'*': [1], '**':{}})
nose.tools.assert_equal(filter_args(ff, ['y'], 1),
{'*': [1], '**':{}})
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
def test_func_inspect_errors():
""" Check that func_inspect is robust and will work on weird objects
"""
nose.tools.assert_equal(get_func_name('a'.lower)[-1], 'lower')
nose.tools.assert_equal(get_func_code('a'.lower)[1:], (None, -1))
ff = lambda x: x
nose.tools.assert_equal(get_func_name(ff, win_characters=False)[-1],
'<lambda>')
nose.tools.assert_equal(get_func_code(ff)[1],
__file__.replace('.pyc', '.py'))
# Simulate a function defined in __main__
ff.__module__ = '__main__'
nose.tools.assert_equal(get_func_name(ff, win_characters=False)[-1],
'<lambda>')
nose.tools.assert_equal(get_func_code(ff)[1],
__file__.replace('.pyc', '.py'))
def test_bound_methods():
""" Make sure that calling the same method on two different instances
of the same class does resolv to different signatures.
"""
a = Klass()
b = Klass()
nose.tools.assert_not_equal(filter_args(a.f, [], 1),
filter_args(b.f, [], 1))
def test_filter_args_error_msg():
""" Make sure that filter_args returns decent error messages, for the
sake of the user.
"""
nose.tools.assert_raises(ValueError, filter_args, f, [])
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/encodings/cp1252.py
|
272
|
""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
maria/PyGithub
|
refs/heads/master
|
github/tests/PaginatedList.py
|
25
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 davidbrai <davidbrai@gmail.com> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
from github.PaginatedList import PaginatedList as PaginatedListImpl
class PaginatedList(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_user("openframeworks").get_repo("openFrameworks")
self.list = self.repo.get_issues()
def testIteration(self):
self.assertEqual(len(list(self.list)), 333)
def testSeveralIterations(self):
self.assertEqual(len(list(self.list)), 333)
self.assertEqual(len(list(self.list)), 333)
self.assertEqual(len(list(self.list)), 333)
self.assertEqual(len(list(self.list)), 333)
def testIntIndexingInFirstPage(self):
self.assertEqual(self.list[0].id, 4772349)
self.assertEqual(self.list[24].id, 4286936)
def testReversedIterationWithSinglePage(self):
r = self.list.reversed
self.assertEqual(r[0].id, 4286936)
self.assertEqual(r[1].id, 4317009)
def testReversedIterationWithMultiplePages(self):
r = self.list.reversed
self.assertEqual(r[0].id, 94898)
self.assertEqual(r[1].id, 104702)
self.assertEqual(r[13].id, 166211)
self.assertEqual(r[14].id, 166212)
self.assertEqual(r[15].id, 166214)
def testReversedIterationSupportsIterator(self):
r = self.list.reversed
for i in r:
self.assertEqual(i.id, 4286936)
return
self.fail("empty iterator")
def testGettingTheReversedListDoesNotModifyTheOriginalList(self):
self.assertEqual(self.list[0].id, 18345408)
self.assertEqual(self.list[30].id, 17916118)
r = self.list.reversed
self.assertEqual(self.list[0].id, 18345408)
self.assertEqual(self.list[30].id, 17916118)
self.assertEqual(r[0].id, 132373)
self.assertEqual(r[30].id, 543694)
def testIntIndexingInThirdPage(self):
self.assertEqual(self.list[50].id, 3911629)
self.assertEqual(self.list[74].id, 3605277)
def testGetFirstPage(self):
self.assertListKeyEqual(self.list.get_page(0), lambda i: i.id, [4772349, 4767675, 4758608, 4700182, 4662873, 4608132, 4604661, 4588997, 4557803, 4554058, 4539985, 4507572, 4507492, 4507416, 4447561, 4406584, 4384548, 4383465, 4373361, 4373201, 4370619, 4356530, 4352401, 4317009, 4286936])
def testGetThirdPage(self):
self.assertListKeyEqual(self.list.get_page(2), lambda i: i.id, [3911629, 3911537, 3910580, 3910555, 3910549, 3897090, 3883598, 3856005, 3850655, 3825582, 3813852, 3812318, 3812275, 3807459, 3799872, 3799653, 3795495, 3754055, 3710293, 3662214, 3647640, 3631618, 3627067, 3614231, 3605277])
def testIntIndexingAfterIteration(self):
self.assertEqual(len(list(self.list)), 333)
self.assertEqual(self.list[11].id, 4507572)
self.assertEqual(self.list[73].id, 3614231)
self.assertEqual(self.list[332].id, 94898)
def testSliceIndexingInFirstPage(self):
self.assertListKeyEqual(self.list[:13], lambda i: i.id, [4772349, 4767675, 4758608, 4700182, 4662873, 4608132, 4604661, 4588997, 4557803, 4554058, 4539985, 4507572, 4507492])
self.assertListKeyEqual(self.list[:13:3], lambda i: i.id, [4772349, 4700182, 4604661, 4554058, 4507492])
self.assertListKeyEqual(self.list[10:13], lambda i: i.id, [4539985, 4507572, 4507492])
self.assertListKeyEqual(self.list[5:13:3], lambda i: i.id, [4608132, 4557803, 4507572])
def testSliceIndexingUntilFourthPage(self):
self.assertListKeyEqual(self.list[:99:10], lambda i: i.id, [4772349, 4539985, 4370619, 4207350, 4063366, 3911629, 3813852, 3647640, 3528378, 3438233])
self.assertListKeyEqual(self.list[73:78], lambda i: i.id, [3614231, 3605277, 3596240, 3594731, 3593619])
self.assertListKeyEqual(self.list[70:80:2], lambda i: i.id, [3647640, 3627067, 3605277, 3594731, 3593430])
def testSliceIndexingUntilEnd(self):
self.assertListKeyEqual(self.list[310::3], lambda i: i.id, [268332, 204247, 169176, 166211, 165898, 163959, 132373, 104702])
self.assertListKeyEqual(self.list[310:], lambda i: i.id, [268332, 211418, 205935, 204247, 172424, 171615, 169176, 166214, 166212, 166211, 166209, 166208, 165898, 165537, 165409, 163959, 132671, 132377, 132373, 130269, 111018, 104702, 94898])
def testInterruptedIteration(self):
# No asserts, but checks that only three pages are fetched
l = 0
for element in self.list: # pragma no branch (exits only by break)
l += 1
if l == 75:
break
def testInterruptedIterationInSlice(self):
# No asserts, but checks that only three pages are fetched
l = 0
for element in self.list[:100]: # pragma no branch (exits only by break)
l += 1
if l == 75:
break
def testCustomPerPage(self):
self.assertEqual(self.g.per_page, 30)
self.g.per_page = 100
self.assertEqual(self.g.per_page, 100)
self.assertEqual(len(list(self.repo.get_issues())), 456)
def testCustomPerPageWithNoUrlParams(self):
import CommitComment # Don't polute github.tests namespace, it would conflict with github.tests.CommitComment
self.g.per_page = 100
paginated_list = PaginatedListImpl(
CommitComment.CommitComment,
self.repo._requester,
self.repo.url + "/comments",
None
)
def testCustomPerPageWithNoUrlParams2(self):
# This test is redountant and less unitary than testCustomPerPageWithNoUrlParams
# but I hope it will be more robust if we refactor PaginatedList,
# because testCustomPerPageWithNoUrlParams only tests the constructor
self.g.per_page = 100
self.assertEqual(len(list(self.repo.get_comments())), 325)
def testCustomPerPageWithGetPage(self):
self.g.per_page = 100
self.assertEqual(len(self.repo.get_issues().get_page(2)), 100)
|
skosukhin/spack
|
refs/heads/esiwace
|
var/spack/repos/builtin/packages/perl-data-optlist/package.py
|
1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlDataOptlist(PerlPackage):
"""Parse and validate simple name/value option pairs"""
homepage = "http://search.cpan.org/~rjbs/Data-OptList-0.110/lib/Data/OptList.pm"
url = "http://search.cpan.org/CPAN/authors/id/R/RJ/RJBS/Data-OptList-0.110.tar.gz"
version('0.110', 'f9236c9ea5607134ad8a2b3dc901c4c5')
depends_on('perl-sub-install', type=('build', 'run'))
|
9929105/KEEP
|
refs/heads/utah-demo
|
keep_backend/vocab/__init__.py
|
12133432
| |
rbramwell/pulp
|
refs/heads/master
|
server/test/unit/server/db/migrations/__init__.py
|
12133432
| |
yvaucher/account-financial-tools
|
refs/heads/master
|
__unported__/account_cancel_invoice_check_voucher/__init__.py
|
2
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Darthkpo/xtt
|
refs/heads/master
|
openpyxl/utils/tests/test_datetime.py
|
1
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
# Python stdlib imports
from datetime import datetime, date, timedelta, time
import pytest
def test_datetime_to_W3CDTF():
from ..datetime import datetime_to_W3CDTF
assert datetime_to_W3CDTF(datetime(2013, 7, 15, 6, 52, 33)) == "2013-07-15T06:52:33Z"
def test_W3CDTF_to_datetime():
from ..datetime import W3CDTF_to_datetime
value = "2011-06-30T13:35:26Z"
assert W3CDTF_to_datetime(value) == datetime(2011, 6, 30, 13, 35, 26)
value = "2013-03-04T12:19:01.00Z"
assert W3CDTF_to_datetime(value) == datetime(2013, 3, 4, 12, 19, 1)
@pytest.mark.parametrize("value, expected",
[
(date(1899, 12, 31), 0),
(date(1900, 1, 15), 15),
(date(1900, 2, 28), 59),
(date(1900, 3, 1), 61),
(datetime(2010, 1, 18, 14, 15, 20, 1600), 40196.5939815),
(date(2009, 12, 20), 40167),
(datetime(1506, 10, 15), -143618.0),
])
def test_to_excel(value, expected):
from ..datetime import to_excel
FUT = to_excel
assert FUT(value) == expected
@pytest.mark.parametrize("value, expected",
[
(date(1904, 1, 1), 0),
(date(2011, 10, 31), 39385),
(datetime(2010, 1, 18, 14, 15, 20, 1600), 38734.5939815),
(date(2009, 12, 20), 38705),
(datetime(1506, 10, 15), -145079.0)
])
def test_to_excel_mac(value, expected):
from ..datetime import to_excel, CALENDAR_MAC_1904
FUT = to_excel
assert FUT(value, CALENDAR_MAC_1904) == expected
@pytest.mark.parametrize("value, expected",
[
(40167, datetime(2009, 12, 20)),
(21980, datetime(1960, 3, 5)),
(59, datetime(1900, 2, 28)),
(-25063, datetime(1831, 5, 18, 0, 0)),
(40372.27616898148, datetime(2010, 7, 13, 6, 37, 41)),
(40196.5939815, datetime(2010, 1, 18, 14, 15, 20, 1600)),
(0.125, time(3, 0)),
(42126.958333333219, datetime(2015, 5, 2, 22, 59, 59, 999990)),
(42126.999999999884, datetime(2015, 5, 3, 0, 0, 0)),
(None, None),
])
def test_from_excel(value, expected):
from ..datetime import from_excel
FUT = from_excel
assert FUT(value) == expected
@pytest.mark.parametrize("value, expected",
[
(39385, datetime(2011, 10, 31)),
(21980, datetime(1964, 3, 6)),
(0, datetime(1904, 1, 1)),
(-25063, datetime(1835, 5, 19))
])
def test_from_excel_mac(value, expected):
from ..datetime import from_excel, CALENDAR_MAC_1904
FUT = from_excel
assert FUT(value, CALENDAR_MAC_1904) == expected
def test_time_to_days():
from ..datetime import time_to_days
FUT = time_to_days
t1 = time(13, 55, 12, 36)
assert FUT(t1) == 0.5800000004166667
t2 = time(3, 0, 0)
assert FUT(t2) == 0.125
def test_timedelta_to_days():
from ..datetime import timedelta_to_days
FUT = timedelta_to_days
td = timedelta(days=1, hours=3)
assert FUT(td) == 1.125
def test_days_to_time():
from ..datetime import days_to_time
td = timedelta(0, 51320, 1600)
FUT = days_to_time
assert FUT(td) == time(14, 15, 20, 1600)
|
vimagick/youtube-dl
|
refs/heads/master
|
youtube_dl/downloader/hls.py
|
26
|
from __future__ import unicode_literals
import os
import re
import subprocess
from .common import FileDownloader
from .fragment import FragmentFD
from ..compat import compat_urlparse
from ..postprocessor.ffmpeg import FFmpegPostProcessor
from ..utils import (
encodeArgument,
encodeFilename,
)
class HlsFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
ffpp = FFmpegPostProcessor(downloader=self)
if not ffpp.available:
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
return False
ffpp.check_version()
args = [
encodeArgument(opt)
for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
args.append(encodeFilename(tmpfilename, True))
self._debug_cmd(args)
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (ffpp.basename, retval))
return False
class NativeHlsFD(FragmentFD):
""" A more limited implementation that does not require ffmpeg """
FD_NAME = 'hlsnative'
def real_download(self, filename, info_dict):
man_url = info_dict['url']
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
s = manifest.decode('utf-8', 'ignore')
fragment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
else compat_urlparse.urljoin(man_url, line))
fragment_urls.append(segment_url)
# We only download the first fragment during the test
if self.params.get('test', False):
break
ctx = {
'filename': filename,
'total_frags': len(fragment_urls),
}
self._prepare_and_start_frag_download(ctx)
frags_filenames = []
for i, frag_url in enumerate(fragment_urls):
frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
success = ctx['dl'].download(frag_filename, {'url': frag_url})
if not success:
return False
with open(frag_filename, 'rb') as down:
ctx['dest_stream'].write(down.read())
frags_filenames.append(frag_filename)
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(frag_file)
return True
|
chiefspace/udemy-rest-api
|
refs/heads/master
|
udemy_rest_api_section4/env/lib/python3.4/site-packages/werkzeug/routing.py
|
87
|
# -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import difflib
import re
import uuid
import posixpath
from pprint import pformat
from threading import Lock
from werkzeug.urls import url_encode, url_quote, url_join
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \
BadHost
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
from werkzeug.utils import cached_property
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE | re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum([
0.98 * difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods)
])
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append('Could not build url for endpoint %r' % self.endpoint)
if self.method:
message.append(' (%r)' % self.method)
if self.values:
message.append(' with values %r' % sorted(self.values.keys()))
message.append('.')
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(' Did you mean to use methods %r?' % sorted(
self.suggested.methods
))
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
' Did you forget to specify values %r?' %
sorted(missing_values)
)
else:
message.append(
' Did you mean %r instead?' % self.suggested.endpoint
)
return u''.join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
if isinstance(methods, str):
raise TypeError('param `methods` should be `Iterable[str]`, not `str`')
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(defaults=defaults, subdomain=self.subdomain,
methods=self.methods, build_only=self.build_only,
endpoint=self.endpoint, strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to, alias=self.alias,
host=self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__') and \
(method is None or self.methods is None or
method in self.methods):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None
and u' (%s)' % u', '.join(self.methods)
or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \
r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}'
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter,
'uuid': UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if 'HTTP_HOST' in environ:
wsgi_server_name = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' \
and wsgi_server_name.endswith(':80'):
wsgi_server_name = wsgi_server_name[:-3]
elif environ['wsgi.url_scheme'] == 'https' \
and wsgi_server_name.endswith(':443'):
wsgi_server_name = wsgi_server_name[:-4]
else:
wsgi_server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|%s' % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and '/%s' % path_info.lstrip('/')
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
url_quote(path_info, self.map.charset,
safe='/:|+') + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(url_join('%s://%s%s%s' % (
self.url_scheme or 'http',
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme or 'http',
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
path_info.lstrip('/')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = iteritems(values, multi=True)
else:
valueiter = iteritems(values)
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)
):
return str(url_join(self.script_name, './' + path.lstrip('/')))
return str('%s//%s%s/%s' % (
self.url_scheme + ':' if self.url_scheme else '',
host,
self.script_name[:-1],
path.lstrip('/')
))
|
westinedu/newertrends
|
refs/heads/master
|
zinnia/views/archives.py
|
2
|
"""Views for Zinnia archives"""
import datetime
from django.views.generic.dates import BaseArchiveIndexView
from django.views.generic.dates import BaseYearArchiveView
from django.views.generic.dates import BaseMonthArchiveView
from django.views.generic.dates import BaseWeekArchiveView
from django.views.generic.dates import BaseDayArchiveView
from django.views.generic.dates import BaseTodayArchiveView
from zinnia.models import Entry
from zinnia.views.mixins.archives import ArchiveMixin
from zinnia.views.mixins.archives import PreviousNextPublishedMixin
from zinnia.views.mixins.callable_queryset import CallableQuerysetMixin
from zinnia.views.mixins.templates import \
EntryQuerysetArchiveTemplateResponseMixin
from zinnia.views.mixins.templates import \
EntryQuerysetArchiveTodayTemplateResponseMixin
from zinnia.views.mixins.tz_fixes import EntryDayTZFix
from zinnia.views.mixins.tz_fixes import EntryWeekTZFix
from zinnia.views.mixins.tz_fixes import EntryMonthTZFix
class EntryArchiveMixin(ArchiveMixin,
PreviousNextPublishedMixin,
CallableQuerysetMixin,
EntryQuerysetArchiveTemplateResponseMixin):
"""
Mixin combinating:
- ArchiveMixin configuration centralizing conf for archive views
- PreviousNextPublishedMixin for returning published archives
- CallableQueryMixin to force the update of the queryset
- EntryQuerysetArchiveTemplateResponseMixin to provide a
custom templates for archives
"""
queryset = Entry.published.all
class EntryIndex(EntryArchiveMixin,
EntryQuerysetArchiveTodayTemplateResponseMixin,
BaseArchiveIndexView):
"""View returning the archive index"""
context_object_name = 'entry_list'
class EntryYear(EntryArchiveMixin, BaseYearArchiveView):
"""View returning the archive for a year"""
make_object_list = True
template_name_suffix = '_archive_year'
class EntryMonth(EntryMonthTZFix, EntryArchiveMixin, BaseMonthArchiveView):
"""View returning the archive for a month"""
template_name_suffix = '_archive_month'
class EntryWeek(EntryWeekTZFix, EntryArchiveMixin, BaseWeekArchiveView):
"""View returning the archive for a week"""
template_name_suffix = '_archive_week'
def get_dated_items(self):
"""Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view"""
self.date_list, self.object_list, extra_context = super(
EntryWeek, self).get_dated_items()
extra_context['week_end_day'] = extra_context[
'week'] + datetime.timedelta(days=6)
return self.date_list, self.object_list, extra_context
class EntryDay(EntryDayTZFix, EntryArchiveMixin, BaseDayArchiveView):
"""View returning the archive for a day"""
template_name_suffix = '_archive_day'
class EntryToday(EntryDayTZFix, EntryArchiveMixin, BaseTodayArchiveView):
"""View returning the archive for the current day"""
template_name_suffix = '_archive_today'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request.
And defines self.year/month/day for
EntryQuerysetArchiveTemplateResponseMixin."""
today = datetime.date.today()
self.year, self.month, self.day = today.isoformat().split('-')
return self._get_dated_items(today)
|
Daniel-CA/odoo
|
refs/heads/8.0
|
addons/base_action_rule/tests/base_action_rule_test.py
|
395
|
from openerp import SUPERUSER_ID
from openerp.tests import common
from .. import test_models
class base_action_rule_test(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(base_action_rule_test, self).setUp()
cr, uid = self.cr, self.uid
self.demo = self.registry('ir.model.data').get_object(cr, uid, 'base', 'user_demo').id
self.admin = SUPERUSER_ID
self.model = self.registry('base.action.rule.lead.test')
self.base_action_rule = self.registry('base.action.rule')
def create_filter_done(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in done state",
'is_default': False,
'model_id': 'base.action.rule.lead.test',
'domain': "[('state','=','done')]",
}, context=context)
def create_filter_draft(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in draft state",
'is_default': False,
'model_id': "base.action.rule.lead.test",
'domain' : "[('state','=','draft')]",
}, context=context)
def create_lead_test_1(self, cr, uid, context=None):
"""
Create a new lead_test
"""
return self.model.create(cr, uid, {
'name': "Lead Test 1",
'user_id': self.admin,
}, context=context)
def create_rule(self, cr, uid, kind, filter_id=False, filter_pre_id=False, context=None):
"""
The "Rule 1" says that when a lead goes to the 'draft' state, the responsible for that lead changes to user "demo"
"""
return self.base_action_rule.create(cr,uid,{
'name': "Rule 1",
'model_id': self.registry('ir.model').search(cr, uid, [('model','=','base.action.rule.lead.test')], context=context)[0],
'kind': kind,
'filter_pre_id': filter_pre_id,
'filter_id': filter_id,
'act_user_id': self.demo,
}, context=context)
def delete_rules(self, cr, uid, context=None):
""" delete all the rules on model 'base.action.rule.lead.test' """
action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)
return self.base_action_rule.unlink(cr, uid, action_ids, context=context)
def test_00_check_to_state_draft_pre(self):
"""
Check that a new record (with state = draft) doesn't change its responsible when there is a precondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_01_check_to_state_draft_post(self):
"""
Check that a new record changes its responsible when there is a postcondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_create')
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_with_steps(self):
"""
A new record will be created and will goes from draft to done state via the other states (open, pending and cancel)
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will never change
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to open and check that responsible doen't change"""
new_lead.write({'state': 'open'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'open')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to pending and check that responsible doen't change"""
new_lead.write({'state': 'pending'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'pending')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to cancel and check that responsible doen't change"""
new_lead.write({'state': 'cancel'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'cancel')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible doen't change """
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_without_steps(self):
"""
A new record will be created and will goes from draft to done in one operation
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will change to user "demo"
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible change to Demo_user"""
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
|
ptisserand/portage
|
refs/heads/master
|
pym/portage/output.py
|
2
|
# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division
__docformat__ = "epytext"
import errno
import io
import formatter
import re
import subprocess
import sys
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.util:writemsg',
)
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import _unicode_decode
from portage.const import COLOR_MAP_FILE
from portage.exception import CommandNotFound, FileNotFound, \
ParseError, PermissionDenied, PortageException
from portage.localization import _
havecolor = 1
dotitles = 1
_styles = {}
"""Maps style class to tuple of attribute names."""
codes = {}
"""Maps attribute name to ansi code."""
esc_seq = "\x1b["
codes["normal"] = esc_seq + "0m"
codes["reset"] = esc_seq + "39;49;00m"
codes["bold"] = esc_seq + "01m"
codes["faint"] = esc_seq + "02m"
codes["standout"] = esc_seq + "03m"
codes["underline"] = esc_seq + "04m"
codes["blink"] = esc_seq + "05m"
codes["overline"] = esc_seq + "06m"
codes["reverse"] = esc_seq + "07m"
codes["invisible"] = esc_seq + "08m"
codes["no-attr"] = esc_seq + "22m"
codes["no-standout"] = esc_seq + "23m"
codes["no-underline"] = esc_seq + "24m"
codes["no-blink"] = esc_seq + "25m"
codes["no-overline"] = esc_seq + "26m"
codes["no-reverse"] = esc_seq + "27m"
codes["bg_black"] = esc_seq + "40m"
codes["bg_darkred"] = esc_seq + "41m"
codes["bg_darkgreen"] = esc_seq + "42m"
codes["bg_brown"] = esc_seq + "43m"
codes["bg_darkblue"] = esc_seq + "44m"
codes["bg_purple"] = esc_seq + "45m"
codes["bg_teal"] = esc_seq + "46m"
codes["bg_lightgray"] = esc_seq + "47m"
codes["bg_default"] = esc_seq + "49m"
codes["bg_darkyellow"] = codes["bg_brown"]
def color(fg, bg="default", attr=["normal"]):
mystr = codes[fg]
for x in [bg]+attr:
mystr += codes[x]
return mystr
ansi_codes = []
for x in range(30, 38):
ansi_codes.append("%im" % x)
ansi_codes.append("%i;01m" % x)
rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
'0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
'0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
for x in range(len(rgb_ansi_colors)):
codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
del x
codes["black"] = codes["0x000000"]
codes["darkgray"] = codes["0x555555"]
codes["red"] = codes["0xFF5555"]
codes["darkred"] = codes["0xAA0000"]
codes["green"] = codes["0x55FF55"]
codes["darkgreen"] = codes["0x00AA00"]
codes["yellow"] = codes["0xFFFF55"]
codes["brown"] = codes["0xAA5500"]
codes["blue"] = codes["0x5555FF"]
codes["darkblue"] = codes["0x0000AA"]
codes["fuchsia"] = codes["0xFF55FF"]
codes["purple"] = codes["0xAA00AA"]
codes["turquoise"] = codes["0x55FFFF"]
codes["teal"] = codes["0x00AAAA"]
codes["white"] = codes["0xFFFFFF"]
codes["lightgray"] = codes["0xAAAAAA"]
codes["darkteal"] = codes["turquoise"]
# Some terminals have darkyellow instead of brown.
codes["0xAAAA00"] = codes["brown"]
codes["darkyellow"] = codes["0xAAAA00"]
# Colors from /etc/init.d/functions.sh
_styles["NORMAL"] = ( "normal", )
_styles["GOOD"] = ( "green", )
_styles["WARN"] = ( "yellow", )
_styles["BAD"] = ( "red", )
_styles["HILITE"] = ( "teal", )
_styles["BRACKET"] = ( "blue", )
# Portage functions
_styles["INFORM"] = ( "darkgreen", )
_styles["UNMERGE_WARN"] = ( "red", )
_styles["SECURITY_WARN"] = ( "red", )
_styles["MERGE_LIST_PROGRESS"] = ( "yellow", )
_styles["PKG_BLOCKER"] = ( "red", )
_styles["PKG_BLOCKER_SATISFIED"] = ( "darkblue", )
_styles["PKG_MERGE"] = ( "darkgreen", )
_styles["PKG_MERGE_SYSTEM"] = ( "darkgreen", )
_styles["PKG_MERGE_WORLD"] = ( "green", )
_styles["PKG_BINARY_MERGE"] = ( "purple", )
_styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", )
_styles["PKG_BINARY_MERGE_WORLD"] = ( "fuchsia", )
_styles["PKG_UNINSTALL"] = ( "red", )
_styles["PKG_NOMERGE"] = ( "darkblue", )
_styles["PKG_NOMERGE_SYSTEM"] = ( "darkblue", )
_styles["PKG_NOMERGE_WORLD"] = ( "blue", )
_styles["PROMPT_CHOICE_DEFAULT"] = ( "green", )
_styles["PROMPT_CHOICE_OTHER"] = ( "red", )
def _parse_color_map(config_root='/', onerror=None):
"""
Parse /etc/portage/color.map and return a dict of error codes.
@param onerror: an optional callback to handle any ParseError that would
otherwise be raised
@type onerror: callable
@rtype: dict
@return: a dictionary mapping color classes to color codes
"""
global codes, _styles
myfile = os.path.join(config_root, COLOR_MAP_FILE)
ansi_code_pattern = re.compile("^[0-9;]*m$")
quotes = '\'"'
def strip_quotes(token):
if token[0] in quotes and token[0] == token[-1]:
token = token[1:-1]
return token
try:
with io.open(_unicode_encode(myfile,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace') as f:
lines = f.readlines()
for lineno, line in enumerate(lines):
commenter_pos = line.find("#")
line = line[:commenter_pos].strip()
if len(line) == 0:
continue
split_line = line.split("=")
if len(split_line) != 2:
e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
(myfile, lineno))
raise e
if onerror:
onerror(e)
else:
raise e
continue
k = strip_quotes(split_line[0].strip())
v = strip_quotes(split_line[1].strip())
if not k in _styles and not k in codes:
e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
(myfile, lineno, k))
if onerror:
onerror(e)
else:
raise e
continue
if ansi_code_pattern.match(v):
if k in _styles:
_styles[k] = ( esc_seq + v, )
elif k in codes:
codes[k] = esc_seq + v
else:
code_list = []
for x in v.split():
if x in codes:
if k in _styles:
code_list.append(x)
elif k in codes:
code_list.append(codes[x])
else:
e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
(myfile, lineno, x))
if onerror:
onerror(e)
else:
raise e
if k in _styles:
_styles[k] = tuple(code_list)
elif k in codes:
codes[k] = "".join(code_list)
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
raise FileNotFound(myfile)
elif e.errno == errno.EACCES:
raise PermissionDenied(myfile)
raise
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr);
return len(tmp)
_legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix)')
_disable_xtermTitle = None
_max_xtermTitle_len = 253
def xtermTitle(mystr, raw=False):
global _disable_xtermTitle
if _disable_xtermTitle is None:
_disable_xtermTitle = not (sys.__stderr__.isatty() and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None)
if dotitles and not _disable_xtermTitle:
# If the title string is too big then the terminal can
# misbehave. Therefore, truncate it if it's too big.
if len(mystr) > _max_xtermTitle_len:
mystr = mystr[:_max_xtermTitle_len]
if not raw:
mystr = '\x1b]0;%s\x07' % mystr
# avoid potential UnicodeEncodeError
mystr = _unicode_encode(mystr,
encoding=_encodings['stdio'], errors='backslashreplace')
f = sys.stderr
if sys.hexversion >= 0x3000000:
f = f.buffer
f.write(mystr)
f.flush()
default_xterm_title = None
def xtermTitleReset():
global default_xterm_title
if default_xterm_title is None:
prompt_command = os.environ.get('PROMPT_COMMAND')
if prompt_command == "":
default_xterm_title = ""
elif prompt_command is not None:
if dotitles and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None and \
sys.__stderr__.isatty():
from portage.process import find_binary, spawn
shell = os.environ.get("SHELL")
if not shell or not os.access(shell, os.EX_OK):
shell = find_binary("sh")
if shell:
spawn([shell, "-c", prompt_command], env=os.environ,
fd_pipes={
0: portage._get_stdin().fileno(),
1: sys.__stderr__.fileno(),
2: sys.__stderr__.fileno()
})
else:
os.system(prompt_command)
return
else:
pwd = os.environ.get('PWD','')
home = os.environ.get('HOME', '')
if home != '' and pwd.startswith(home):
pwd = '~' + pwd[len(home):]
default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
os.environ.get('LOGNAME', ''),
os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
xtermTitle(default_xterm_title, raw=True)
def notitles():
"turn off title setting"
dotitles = 0
def nocolor():
"turn off colorization"
global havecolor
havecolor = 0
def resetColor():
return codes["reset"]
def style_to_ansi_code(style):
"""
@param style: A style name
@type style: String
@rtype: String
@return: A string containing one or more ansi escape codes that are
used to render the given style.
"""
ret = ""
for attr_name in _styles[style]:
# allow stuff that has found it's way through ansi_code_pattern
ret += codes.get(attr_name, attr_name)
return ret
def colormap():
mycolors = []
for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"):
mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c)))
return "\n".join(mycolors)
def colorize(color_key, text):
global havecolor
if havecolor:
if color_key in codes:
return codes[color_key] + text + codes["reset"]
elif color_key in _styles:
return style_to_ansi_code(color_key) + text + codes["reset"]
else:
return text
else:
return text
compat_functions_colors = [
"bold", "white", "teal", "turquoise", "darkteal",
"fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow",
"brown", "darkyellow", "red", "darkred",
]
class create_color_func(object):
__slots__ = ("_color_key",)
def __init__(self, color_key):
self._color_key = color_key
def __call__(self, text):
return colorize(self._color_key, text)
for c in compat_functions_colors:
globals()[c] = create_color_func(c)
class ConsoleStyleFile(object):
"""
A file-like object that behaves something like
the colorize() function. Style identifiers
passed in via the new_styles() method will be used to
apply console codes to output.
"""
def __init__(self, f):
self._file = f
self._styles = None
self.write_listener = None
def new_styles(self, styles):
self._styles = styles
def write(self, s):
# In python-2.6, DumbWriter.send_line_break() can write
# non-unicode '\n' which fails with TypeError if self._file
# is a text stream such as io.StringIO. Therefore, make sure
# input is converted to unicode when necessary.
s = _unicode_decode(s)
global havecolor
if havecolor and self._styles:
styled_s = []
for style in self._styles:
styled_s.append(style_to_ansi_code(style))
styled_s.append(s)
styled_s.append(codes["reset"])
self._write(self._file, "".join(styled_s))
else:
self._write(self._file, s)
if self.write_listener:
self._write(self.write_listener, s)
def _write(self, f, s):
# avoid potential UnicodeEncodeError
if f in (sys.stdout, sys.stderr):
s = _unicode_encode(s,
encoding=_encodings['stdio'], errors='backslashreplace')
if sys.hexversion >= 0x3000000:
f = f.buffer
f.write(s)
def writelines(self, lines):
for s in lines:
self.write(s)
def flush(self):
self._file.flush()
def close(self):
self._file.close()
class StyleWriter(formatter.DumbWriter):
"""
This is just a DumbWriter with a hook in the new_styles() method
that passes a styles tuple as a single argument to a callable
style_listener attribute.
"""
def __init__(self, **kwargs):
formatter.DumbWriter.__init__(self, **kwargs)
self.style_listener = None
def new_styles(self, styles):
formatter.DumbWriter.new_styles(self, styles)
if self.style_listener:
self.style_listener(styles)
def get_term_size(fd=None):
"""
Get the number of lines and columns of the tty that is connected to
fd. Returns a tuple of (lines, columns) or (0, 0) if an error
occurs. The curses module is used if available, otherwise the output of
`stty size` is parsed. The lines and columns values are guaranteed to be
greater than or equal to zero, since a negative COLUMNS variable is
known to prevent some commands from working (see bug #394091).
"""
if fd is None:
fd = sys.stdout
if not hasattr(fd, 'isatty') or not fd.isatty():
return (0, 0)
try:
import curses
try:
curses.setupterm(term=os.environ.get("TERM", "unknown"),
fd=fd.fileno())
return curses.tigetnum('lines'), curses.tigetnum('cols')
except curses.error:
pass
except ImportError:
pass
try:
proc = subprocess.Popen(["stty", "size"],
stdout=subprocess.PIPE, stderr=fd)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
# stty command not found
return (0, 0)
out = _unicode_decode(proc.communicate()[0])
if proc.wait() == os.EX_OK:
out = out.split()
if len(out) == 2:
try:
val = (int(out[0]), int(out[1]))
except ValueError:
pass
else:
if val[0] >= 0 and val[1] >= 0:
return val
return (0, 0)
def set_term_size(lines, columns, fd):
"""
Set the number of lines and columns for the tty that is connected to fd.
For portability, this simply calls `stty rows $lines columns $columns`.
"""
from portage.process import spawn
cmd = ["stty", "rows", str(lines), "columns", str(columns)]
try:
spawn(cmd, env=os.environ, fd_pipes={0:fd})
except CommandNotFound:
writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
class EOutput(object):
"""
Performs fancy terminal formatting for status and informational messages.
The provided methods produce identical terminal output to the eponymous
functions in the shell script C{/sbin/functions.sh} and also accept
identical parameters.
This is not currently a drop-in replacement however, as the output-related
functions in C{/sbin/functions.sh} are oriented for use mainly by system
init scripts and ebuilds and their output can be customized via certain
C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
customizable in this manner since it's intended for more general uses.
Likewise, no logging is provided.
@ivar quiet: Specifies if output should be silenced.
@type quiet: BooleanType
@ivar term_columns: Width of terminal in characters. Defaults to the value
specified by the shell's C{COLUMNS} variable, else to the queried tty
size, else to C{80}.
@type term_columns: IntType
"""
def __init__(self, quiet=False):
self.__last_e_cmd = ""
self.__last_e_len = 0
self.quiet = quiet
lines, columns = get_term_size()
if columns <= 0:
columns = 80
self.term_columns = columns
sys.stdout.flush()
sys.stderr.flush()
def _write(self, f, s):
# avoid potential UnicodeEncodeError
writemsg(s, noiselevel=-1, fd=f)
def __eend(self, caller, errno, msg):
if errno == 0:
status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
else:
status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
if msg:
if caller == "eend":
self.eerror(msg[0])
elif caller == "ewend":
self.ewarn(msg[0])
if self.__last_e_cmd != "ebegin":
self.__last_e_len = 0
if not self.quiet:
out = sys.stdout
self._write(out,
"%*s%s\n" % ((self.term_columns - self.__last_e_len - 7),
"", status_brackets))
def ebegin(self, msg):
"""
Shows a message indicating the start of a process.
@param msg: A very brief (shorter than one line) description of the
starting process.
@type msg: StringType
"""
msg += " ..."
if not self.quiet:
self.einfon(msg)
self.__last_e_len = len(msg) + 3
self.__last_e_cmd = "ebegin"
def eend(self, errno, *msg):
"""
Indicates the completion of a process, optionally displaying a message
via L{eerror} if the process's exit status isn't C{0}.
@param errno: A standard UNIX C{errno} code returned by processes upon
exit.
@type errno: IntType
@param msg: I{(optional)} An error message, typically a standard UNIX
error string corresponding to C{errno}.
@type msg: StringType
"""
if not self.quiet:
self.__eend("eend", errno, msg)
self.__last_e_cmd = "eend"
def eerror(self, msg):
"""
Shows an error message.
@param msg: A very brief (shorter than one line) error message.
@type msg: StringType
"""
out = sys.stderr
if not self.quiet:
if self.__last_e_cmd == "ebegin":
self._write(out, "\n")
self._write(out, colorize("BAD", " * ") + msg + "\n")
self.__last_e_cmd = "eerror"
def einfo(self, msg):
"""
Shows an informative message terminated with a newline.
@param msg: A very brief (shorter than one line) informative message.
@type msg: StringType
"""
out = sys.stdout
if not self.quiet:
if self.__last_e_cmd == "ebegin":
self._write(out, "\n")
self._write(out, colorize("GOOD", " * ") + msg + "\n")
self.__last_e_cmd = "einfo"
def einfon(self, msg):
"""
Shows an informative message terminated without a newline.
@param msg: A very brief (shorter than one line) informative message.
@type msg: StringType
"""
out = sys.stdout
if not self.quiet:
if self.__last_e_cmd == "ebegin":
self._write(out, "\n")
self._write(out, colorize("GOOD", " * ") + msg)
self.__last_e_cmd = "einfon"
def ewarn(self, msg):
"""
Shows a warning message.
@param msg: A very brief (shorter than one line) warning message.
@type msg: StringType
"""
out = sys.stderr
if not self.quiet:
if self.__last_e_cmd == "ebegin":
self._write(out, "\n")
self._write(out, colorize("WARN", " * ") + msg + "\n")
self.__last_e_cmd = "ewarn"
def ewend(self, errno, *msg):
"""
Indicates the completion of a process, optionally displaying a message
via L{ewarn} if the process's exit status isn't C{0}.
@param errno: A standard UNIX C{errno} code returned by processes upon
exit.
@type errno: IntType
@param msg: I{(optional)} A warning message, typically a standard UNIX
error string corresponding to C{errno}.
@type msg: StringType
"""
if not self.quiet:
self.__eend("ewend", errno, msg)
self.__last_e_cmd = "ewend"
class ProgressBar(object):
"""The interface is copied from the ProgressBar class from the EasyDialogs
module (which is Mac only)."""
def __init__(self, title=None, maxval=0, label=None, max_desc_length=25):
self._title = title or ""
self._maxval = maxval
self._label = label or ""
self._curval = 0
self._desc = ""
self._desc_max_length = max_desc_length
self._set_desc()
@property
def curval(self):
"""
The current value (of type integer or long integer) of the progress
bar. The normal access methods coerce curval between 0 and maxval. This
attribute should not be altered directly.
"""
return self._curval
@property
def maxval(self):
"""
The maximum value (of type integer or long integer) of the progress
bar; the progress bar (thermometer style) is full when curval equals
maxval. If maxval is 0, the bar will be indeterminate (barber-pole).
This attribute should not be altered directly.
"""
return self._maxval
def title(self, newstr):
"""Sets the text in the title bar of the progress dialog to newstr."""
self._title = newstr
self._set_desc()
def label(self, newstr):
"""Sets the text in the progress box of the progress dialog to newstr."""
self._label = newstr
self._set_desc()
def _set_desc(self):
self._desc = "%s%s" % (
"%s: " % self._title if self._title else "",
"%s" % self._label if self._label else ""
)
if len(self._desc) > self._desc_max_length: # truncate if too long
self._desc = "%s..." % self._desc[:self._desc_max_length - 3]
if len(self._desc):
self._desc = self._desc.ljust(self._desc_max_length)
def set(self, value, maxval=None):
"""
Sets the progress bar's curval to value, and also maxval to max if the
latter is provided. value is first coerced between 0 and maxval. The
thermometer bar is updated to reflect the changes, including a change
from indeterminate to determinate or vice versa.
"""
if maxval is not None:
self._maxval = maxval
if value < 0:
value = 0
elif value > self._maxval:
value = self._maxval
self._curval = value
def inc(self, n=1):
"""Increments the progress bar's curval by n, or by 1 if n is not
provided. (Note that n may be negative, in which case the effect is a
decrement.) The progress bar is updated to reflect the change. If the
bar is indeterminate, this causes one ``spin'' of the barber pole. The
resulting curval is coerced between 0 and maxval if incrementing causes
it to fall outside this range.
"""
self.set(self._curval+n)
class TermProgressBar(ProgressBar):
"""A tty progress bar similar to wget's."""
def __init__(self, fd=sys.stdout, **kwargs):
ProgressBar.__init__(self, **kwargs)
lines, self.term_columns = get_term_size(fd)
self.file = fd
self._min_columns = 11
self._max_columns = 80
# for indeterminate mode, ranges from 0.0 to 1.0
self._position = 0.0
def set(self, value, maxval=None):
ProgressBar.set(self, value, maxval=maxval)
self._display_image(self._create_image())
def _display_image(self, image):
self.file.write('\r')
self.file.write(image)
self.file.flush()
def _create_image(self):
cols = self.term_columns
if cols > self._max_columns:
cols = self._max_columns
min_columns = self._min_columns
curval = self._curval
maxval = self._maxval
position = self._position
percentage_str_width = 5
square_brackets_width = 2
if cols < percentage_str_width:
return ""
bar_space = cols - percentage_str_width - square_brackets_width - 1
if self._desc:
bar_space -= self._desc_max_length
if maxval == 0:
max_bar_width = bar_space-3
_percent = "".ljust(percentage_str_width)
if cols < min_columns:
return ""
if position <= 0.5:
offset = 2 * position
else:
offset = 2 * (1 - position)
delta = 0.5 / max_bar_width
position += delta
if position >= 1.0:
position = 0.0
# make sure it touches the ends
if 1.0 - position < delta:
position = 1.0
if position < 0.5 and 0.5 - position < delta:
position = 0.5
self._position = position
bar_width = int(offset * max_bar_width)
image = "%s%s%s" % (self._desc, _percent,
"[" + (bar_width * " ") + \
"<=>" + ((max_bar_width - bar_width) * " ") + "]")
return image
else:
percentage = 100 * curval // maxval
max_bar_width = bar_space - 1
_percent = ("%d%% " % percentage).rjust(percentage_str_width)
image = "%s%s" % (self._desc, _percent)
if cols < min_columns:
return image
offset = curval / maxval
bar_width = int(offset * max_bar_width)
image = image + "[" + (bar_width * "=") + \
">" + ((max_bar_width - bar_width) * " ") + "]"
return image
_color_map_loaded = False
def _init(config_root='/'):
"""
Load color.map from the given config_root. This is called automatically
on first access of the codes or _styles attributes (unless it has already
been called for some other reason).
"""
global _color_map_loaded, codes, _styles
if _color_map_loaded:
return
_color_map_loaded = True
codes = object.__getattribute__(codes, '_attr')
_styles = object.__getattribute__(_styles, '_attr')
for k, v in codes.items():
codes[k] = _unicode_decode(v)
for k, v in _styles.items():
_styles[k] = _unicode_decode(v)
try:
_parse_color_map(config_root=config_root,
onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1))
except FileNotFound:
pass
except PermissionDenied as e:
writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1)
del e
except PortageException as e:
writemsg("%s\n" % str(e), noiselevel=-1)
del e
class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy):
__slots__ = ('_attr',)
def __init__(self, attr):
portage.proxy.objectproxy.ObjectProxy.__init__(self)
object.__setattr__(self, '_attr', attr)
def _get_target(self):
_init()
return object.__getattribute__(self, '_attr')
codes = _LazyInitColorMap(codes)
_styles = _LazyInitColorMap(_styles)
|
boonedox/AutobahnPython
|
refs/heads/master
|
examples/wamp/dbus/server.py
|
27
|
###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerFactory, \
WampCraServerProtocol
class DbusServerProtocol(WampCraServerProtocol):
## our pseudo user/permissions database
USERS = {'user1': 'secret',
'user2': 'geheim'}
def onSessionOpen(self):
## override global client auth options
self.clientAuthTimeout = 0
self.clientAuthAllowAnonymous = True
## call base class method
WampCraServerProtocol.onSessionOpen(self)
def getAuthPermissions(self, authKey, authExtra):
if authKey is None:
## notification issuer is only allowed to publish to topics
## and retrieve list of users
pms = {'pubsub': [{'uri': 'http://example.com/topics/',
'prefix': True,
'pub': True,
'sub': False}],
'rpc': [{'uri': 'http://example.com/procedures/getusers',
'call': True}]}
return {'permissions': pms}
else:
## desktop notification client is only allowed to subscribe to topics
## http://example.com/topics/all
## http://example.com/topics/<user>
##
pms = {'pubsub': [{'uri': 'http://example.com/topics/all',
'prefix': False,
'pub': False,
'sub': True},
{'uri': 'http://example.com/topics/%s' % authKey,
'prefix': False,
'pub': False,
'sub': True}],
'rpc': []}
return {'permissions': pms}
def getAuthSecret(self, authKey):
## return the auth secret for the given auth key or None when the auth key
## does not exist
return self.USERS.get(authKey, None)
def onAuthenticated(self, authKey, permissions):
## fired when authentication succeeds
## register PubSub topics from the auth permissions
self.registerForPubSubFromPermissions(perms['permissions'])
## register RPC endpoints (for now do that manually, keep in sync with perms)
if authKey is None:
self.registerForRpc(self, 'http://example.com/procedures/',
[MyServerProtocol.getUsers])
@exportRpc("getusers")
def getUsers(self):
return self.USERS.keys()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = DbusServerProtocol
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
|
Star2Billing/cdr-stats
|
refs/heads/develop
|
cdr_stats/voip_billing/models.py
|
1
|
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.db import models
from django.utils.translation import ugettext as _
from country_dialcode.models import Prefix
from voip_gateway.models import Provider
from voip_billing.constants import LCR_TYPE
from django_lets_go.intermediate_model_base_class import Model
from django.db import connection
class VoIPPlan(Model):
"""
VoIPPlan
VoIPPlans are associated to your clients, this defines the rate at which
the VoIP calls are sold to your clients.
An VoIPPlan is a collection of VoIPRetailPlans, you can have 1 or more
VoIPRetailPlans associated to the VoIPPlan
A client has a single VoIPPlan,
VoIPPlan has many VoIPRetailPlans.
VoIPRetailPlan has VoIPRetailRates
The LCR system will route the VoIP via the lowest cost carrier.
"""
name = models.CharField(unique=True, max_length=255, verbose_name=_('name'),
help_text=_("enter plan name"))
pubname = models.CharField(max_length=255, verbose_name=_('publish name'),
help_text=_("enter publish name"))
lcrtype = models.IntegerField(choices=list(LCR_TYPE), verbose_name=_('LCR type'),
help_text=_("select LCR type"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_plan'
verbose_name = _("VoIP plan")
verbose_name_plural = _("VoIP plans")
def __unicode__(self):
return '[%s] %s' % (self.id, self.name)
class BanPlan(models.Model):
"""
BanPlan
List of Ban Plan which are linked to VoIP Plan
"""
name = models.CharField(unique=True, max_length=255, verbose_name=_('name'),
help_text=_("enter ban plan name"))
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
voip_plan = models.ManyToManyField(VoIPPlan, through='VoIPPlan_BanPlan')
class Meta:
db_table = u'voipbilling_banplan'
verbose_name = _("ban plan")
verbose_name_plural = _("ban plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPPlan_BanPlan(models.Model):
"""
VoIPPlan_BanPlan
OnetoMany relationship between VoIPPlan & BanPlan
"""
voipplan = models.ForeignKey(VoIPPlan, related_name='voip plan')
banplan = models.ForeignKey(BanPlan, related_name='ban plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_banplan'
def __unicode__(self):
return "%s" % (self.banplan)
class BanPrefix(models.Model):
"""
BanPrefix
Ban prefixes are linked to Ban plan & VoIP with these prefix
will not be authorized to send.
"""
ban_plan = models.ForeignKey(BanPlan, verbose_name=_('ban plan'), help_text=_("select ban plan"))
prefix = models.ForeignKey(Prefix, verbose_name=_('prefix'), help_text=_("select prefix"))
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voipbilling_ban_prefix'
verbose_name = _("ban prefix")
verbose_name_plural = _("ban prefixes")
def __unicode__(self):
return "%s" % (self.ban_plan)
def prefix_with_name(self):
"""
Return prefix with name
on Ban Prefix Listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _('prefix')
class VoIPRetailPlan(Model):
"""
VoIPRetailPlan
This contains the VoIPRetailRates to retail to the customer. these plans are
associated to the VoIPPlan with a ManyToMany relation.
It defines the costs at which we sell the VoIP calls to clients.
VoIPRetailPlan will then contain a set of VoIPRetailRates which will define
the cost of sending a VoIP call to each destination.
The system can have several VoIPRetailPlans, but only the ones associated to
the VoIPplan will be used by the client.
"""
name = models.CharField(max_length=255, verbose_name=_('name'), help_text=_("enter plan name"))
description = models.TextField(verbose_name=_('description'), null=True, blank=True,
help_text=_("short description about Plan"))
metric = models.IntegerField(default=10, verbose_name=_('metric'), help_text=_("enter metric in digit"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
voip_plan = models.ManyToManyField(VoIPPlan, through='VoIPPlan_VoIPRetailPlan')
class Meta:
db_table = u'voip_retail_plan'
verbose_name = _("retail plan")
verbose_name_plural = _("retail plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPPlan_VoIPRetailPlan(models.Model):
"""
VoIPPlan_VoIPRetailPlan
ManytoMany relationship between VoIPPlan & VoIPRetailPlan
"""
voipretailplan = models.ForeignKey(VoIPRetailPlan, related_name='VoIP Retail Plan')
voipplan = models.ForeignKey(VoIPPlan, related_name='VoIP Plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_voipretailplan'
def __unicode__(self):
return "%s" % (self.voipplan)
class VoIPRetailRate(models.Model):
"""
VoIPRetailRate
A single VoIPRetailRate consists of a retail rate and prefix at which you
want to use to sell a VoIP Call to a particular destination.
VoIPRetailRates are grouped by VoIPRetailPlan, which will be then in turn be
associated to a VoIPPlan
"""
voip_retail_plan_id = models.ForeignKey(VoIPRetailPlan, db_column="voip_retail_plan_id",
verbose_name=_("retail plan"),
help_text=_("select retail plan"))
prefix = models.ForeignKey(Prefix, db_column="prefix", verbose_name=_("prefix"),
help_text=_("select prefix"))
retail_rate = models.DecimalField(max_digits=10, decimal_places=4, default=0, verbose_name=_("rate"),
help_text=_("enter Rate"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_retail_rate'
verbose_name = _("retail rate")
verbose_name_plural = _("retail rates")
def voip_retail_plan_name(self):
"""
Return Retail Plan name
on Retail Rate listing (changelist_view)
"""
if self.voip_retail_plan_id is None:
return ""
else:
return self.voip_retail_plan_id.name
voip_retail_plan_name.short_description = _("retail plan")
def prefix_with_name(self):
"""
Return prefix with name
on Retail Rate listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _('prefix')
class VoIPCarrierPlan(Model):
"""
VoIPCarrierPlan
Once the retail price is defined by the VoIPPlan, VoIPRetailPlans and
VoIPRetailRates, we also need to know which is the best route to send
the VoIP how much it will cost, and which VoIP Gateway to use.
VoIPCarrierPlan is linked to the VoIP Plan, so once we found how to sell
the service to the client, we need to look at which carrier (Provider)
we want to use, The VoIPCarrierPlan defines this.
The system can have several VoIPCarrierPlans, but only the one associated to
the VoIPRetailPlan-VoIPPlan will be used to connect the VoIP of
the client.
"""
name = models.CharField(max_length=255, verbose_name=_("name"),
help_text=_("enter plan name"))
description = models.TextField(verbose_name=_("description"),
null=True, blank=True,
help_text=_("short description about Plan"))
metric = models.IntegerField(default=10, verbose_name=_("metric"),
help_text=_("enter metric in digit"))
callsent = models.IntegerField(null=True, blank=True,
verbose_name=_("message sent"))
voip_provider_id = models.ForeignKey(Provider, db_column="voip_provider_id",
verbose_name=_("provider"),
help_text=_("select provider"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_carrier_plan'
verbose_name = _("carrier plan")
verbose_name_plural = _("carrier plans")
def __unicode__(self):
return "%s" % (self.name)
class VoIPCarrierRate(models.Model):
"""
VoIPCarrierRate
The VoIPCarrierRates are a set of all the carrier rate and prefix that
will be used to purchase the VoIP from your carrier,
VoIPCarrierRates are grouped by VoIPCarrierPlan, which will be then
associated to a VoIPRetailPlan
"""
voip_carrier_plan_id = models.ForeignKey(VoIPCarrierPlan, db_column="voip_carrier_plan_id",
verbose_name=_("carrier plan"),
help_text=_("select carrier plan"))
prefix = models.ForeignKey(Prefix, db_column="prefix", verbose_name=_("prefix"),
help_text=_("select prefix"))
carrier_rate = models.DecimalField(max_digits=10, decimal_places=4, default=0, verbose_name=_("rate"),
help_text=_("enter rate"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_("date"))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'voip_carrier_rate'
verbose_name = _("carrier rate")
verbose_name_plural = _("carrier rates")
def voip_carrier_plan_name(self):
"""
Return Carrier Plan name
on Carrier Rate listing (changelist_view)
"""
if self.voip_carrier_plan_id is None:
return ""
else:
return self.voip_carrier_plan_id.name
voip_carrier_plan_name.short_description = _("carrier plan")
def prefix_with_name(self):
"""
Return prefix with name
on Carrier Rate listing (changelist_view)
"""
if self.prefix is None:
return ""
else:
return "[%d] - %s" % (self.prefix.prefix, self.prefix.destination)
prefix_with_name.short_description = _("prefix")
class VoIPPlan_VoIPCarrierPlan(models.Model):
"""
VoIPPlan_VoIPCarrierPlan
ManytoMany relationship between VoIPPlan & VoIPCarrierPlan
"""
voipcarrierplan = models.ForeignKey(VoIPCarrierPlan, related_name='carrier plan')
voipplan = models.ForeignKey(VoIPPlan, related_name='voip_plan')
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = u'voipplan_voipcarrierplan'
def __unicode__(self):
return "%s" % (self.voipplan)
def find_rates(voipplan_id, dialcode, sort_field, order):
"""
function to retrieve list of rates belonging to a voipplan
"""
cursor = connection.cursor()
# variables used for sorting
extension_query = ''
if sort_field == 'prefix':
sort_field = 'voip_retail_rate.prefix'
if sort_field == 'retail_rate':
sort_field = 'minrate'
if sort_field == 'destination':
sort_field = 'dialcode_prefix.destination'
if sort_field:
extension_query = "ORDER BY " + sort_field + ' ' + order
cursor = connection.cursor()
if dialcode:
sqldialcode = str(dialcode) + '%'
sql_statement = (
"SELECT voip_retail_rate.prefix, "
"Min(retail_rate) as minrate, dialcode_prefix.destination "
"FROM voip_retail_rate "
"INNER JOIN voipplan_voipretailplan "
"ON voipplan_voipretailplan.voipretailplan_id = "
"voip_retail_rate.voip_retail_plan_id "
"LEFT JOIN dialcode_prefix ON dialcode_prefix.prefix = "
"voip_retail_rate.prefix "
"WHERE voipplan_id=%s "
"AND CAST(voip_retail_rate.prefix AS TEXT) LIKE %s "
"GROUP BY voip_retail_rate.prefix, dialcode_prefix.destination "
+ extension_query)
cursor.execute(sql_statement, [voipplan_id, sqldialcode])
else:
sql_statement = (
"SELECT voip_retail_rate.prefix, "
"Min(retail_rate) as minrate, dialcode_prefix.destination "
"FROM voip_retail_rate "
"INNER JOIN voipplan_voipretailplan "
"ON voipplan_voipretailplan.voipretailplan_id = "
"voip_retail_rate.voip_retail_plan_id "
"LEFT JOIN dialcode_prefix ON dialcode_prefix.prefix = "
"voip_retail_rate.prefix "
"WHERE voipplan_id=%s "
"GROUP BY voip_retail_rate.prefix, dialcode_prefix.destination "
+ extension_query)
cursor.execute(sql_statement, [voipplan_id])
row = cursor.fetchall()
result = []
for record in row:
# Not banned Prefix
allowed = prefix_allowed_to_call(record[0], voipplan_id)
if allowed:
modrecord = {}
modrecord['prefix'] = record[0]
modrecord['retail_rate'] = record[1]
modrecord['prefix__destination'] = record[2]
result.append(modrecord)
return result
|
dou800/php-buildpack-legacy
|
refs/heads/master
|
builds/runtimes/python-2.7.6/lib/python2.7/test/test_int_literal.py
|
139
|
"""Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
from test import test_support
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648L)
self.assertEqual(0xffffffff, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648L)
self.assertEqual(-(0xffffffff), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648L)
self.assertEqual(-0xffffffff, -4294967295L)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808L)
self.assertEqual(0xffffffffffffffff, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
self.assertEqual(017777777777, 2147483647)
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
self.assertEqual(-(017777777777), -2147483647)
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
self.assertEqual(-017777777777, -2147483647)
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_baseline_new(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(020000000000, 2147483648L)
self.assertEqual(037777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), -2147483648L)
self.assertEqual(-(037777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, -2147483648L)
self.assertEqual(-037777777777, -4294967295L)
# Positive constants
self.assertEqual(01000000000000000000000, 9223372036854775808L)
self.assertEqual(01777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), -9223372036854775808L)
self.assertEqual(-(01777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, -9223372036854775808L)
self.assertEqual(-01777777777777777777777, -18446744073709551615L)
def test_oct_unsigned_new(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648L)
self.assertEqual(0o37777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648L)
self.assertEqual(-(0o37777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648L)
self.assertEqual(-0o37777777777, -4294967295L)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808L)
self.assertEqual(0o1777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648L)
self.assertEqual(0b11111111111111111111111111111111, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)
def test_main():
test_support.run_unittest(TestHexOctBin)
if __name__ == "__main__":
test_main()
|
Desarrollo-CeSPI/meran
|
refs/heads/0.10.7
|
dev-plugins/node/lib/node/wafadmin/Tools/unittestw.py
|
4
|
#!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Carlos Rafael Giani, 2006
"""
Unit tests run in the shutdown() method, and for c/c++ programs
One should NOT have to give parameters to programs to execute
In the shutdown method, add the following code:
>>> def shutdown():
... ut = UnitTest.unit_test()
... ut.run()
... ut.print_results()
Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
"""
import os, sys
import Build, TaskGen, Utils, Options, Logs, Task
from TaskGen import before, after, feature
from Constants import *
class unit_test(object):
"Unit test representation"
def __init__(self):
self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
# will cause the unit test to be marked as "FAILED".
# The following variables are filled with data by run().
# print_results() uses these for printing the unit test summary,
# but if there is need for direct access to the results,
# they can be retrieved here, after calling run().
self.num_tests_ok = 0 # Number of successful unit tests
self.num_tests_failed = 0 # Number of failed unit tests
self.num_tests_err = 0 # Tests that have not even run
self.total_num_tests = 0 # Total amount of unit tests
self.max_label_length = 0 # Maximum label length (pretty-print the output)
self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
# to the build dir), value: unit test filename with absolute path
self.unit_test_results = {} # Dictionary containing the unit test results.
# Key: the label, value: result (true = success false = failure)
self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
# Key: the label, value: true = unit test has an error false = unit test is ok
self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
self.run_if_waf_does = 'check' #build was the old default
def run(self):
"Run the unit tests and gather results (note: no output here)"
self.num_tests_ok = 0
self.num_tests_failed = 0
self.num_tests_err = 0
self.total_num_tests = 0
self.max_label_length = 0
self.unit_tests = Utils.ordered_dict()
self.unit_test_results = {}
self.unit_test_erroneous = {}
ld_library_path = []
# If waf is not building, don't run anything
if not Options.commands[self.run_if_waf_does]: return
# Get the paths for the shared libraries, and obtain the unit tests to execute
for obj in Build.bld.all_task_gen:
try:
link_task = obj.link_task
except AttributeError:
pass
else:
lib_path = link_task.outputs[0].parent.abspath(obj.env)
if lib_path not in ld_library_path:
ld_library_path.append(lib_path)
unit_test = getattr(obj, 'unit_test', '')
if unit_test and 'cprogram' in obj.features:
try:
output = obj.path
filename = os.path.join(output.abspath(obj.env), obj.target)
srcdir = output.abspath()
label = os.path.join(output.bldpath(obj.env), obj.target)
self.max_label_length = max(self.max_label_length, len(label))
self.unit_tests[label] = (filename, srcdir)
except KeyError:
pass
self.total_num_tests = len(self.unit_tests)
# Now run the unit tests
Utils.pprint('GREEN', 'Running the unit tests')
count = 0
result = 1
for label in self.unit_tests.allkeys:
file_and_src = self.unit_tests[label]
filename = file_and_src[0]
srcdir = file_and_src[1]
count += 1
line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
if Options.options.progress_bar and line:
sys.stderr.write(line)
sys.stderr.flush()
try:
kwargs = {}
kwargs['env'] = os.environ.copy()
if self.change_to_testfile_dir:
kwargs['cwd'] = srcdir
if not self.want_to_see_test_output:
kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
if not self.want_to_see_test_error:
kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
if ld_library_path:
v = kwargs['env']
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(v, ld_library_path, 'PATH')
elif sys.platform == 'darwin':
add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
else:
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
pp = Utils.pproc.Popen(filename, **kwargs)
pp.wait()
result = int(pp.returncode == self.returncode_ok)
if result:
self.num_tests_ok += 1
else:
self.num_tests_failed += 1
self.unit_test_results[label] = result
self.unit_test_erroneous[label] = 0
except OSError:
self.unit_test_erroneous[label] = 1
self.num_tests_err += 1
except KeyboardInterrupt:
pass
if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
def print_results(self):
"Pretty-prints a summary of all unit tests, along with some statistics"
# If waf is not building, don't output anything
if not Options.commands[self.run_if_waf_does]: return
p = Utils.pprint
# Early quit if no tests were performed
if self.total_num_tests == 0:
p('YELLOW', 'No unit tests present')
return
for label in self.unit_tests.allkeys:
filename = self.unit_tests[label]
err = 0
result = 0
try: err = self.unit_test_erroneous[label]
except KeyError: pass
try: result = self.unit_test_results[label]
except KeyError: pass
n = self.max_label_length - len(label)
if err: n += 4
elif result: n += 7
else: n += 3
line = '%s %s' % (label, '.' * n)
if err: p('RED', '%sERROR' % line)
elif result: p('GREEN', '%sOK' % line)
else: p('YELLOW', '%sFAILED' % line)
percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
p('NORMAL', '''
Successful tests: %i (%.1f%%)
Failed tests: %i (%.1f%%)
Erroneous tests: %i (%.1f%%)
Total number of tests: %i
''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
self.num_tests_err, percentage_erroneous, self.total_num_tests))
p('GREEN', 'Unit tests finished')
############################################################################################
"""
New unit test system
The targets with feature 'test' are executed after they are built
bld(features='cprogram cc test', ...)
To display the results:
import UnitTest
bld.add_post_fun(UnitTest.summary)
"""
import threading
testlock = threading.Lock()
def set_options(opt):
opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests')
@feature('test')
@after('apply_link', 'vars_target_cprogram')
def make_test(self):
if not 'cprogram' in self.features:
Logs.error('test cannot be executed %s' % self)
return
self.default_install_path = None
self.create_task('utest', self.link_task.outputs)
def exec_test(self):
status = 0
variant = self.env.variant()
filename = self.inputs[0].abspath(self.env)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
self.generator.bld.all_test_paths = fu
lst = []
for obj in self.generator.bld.all_task_gen:
link_task = getattr(obj, 'link_task', None)
if link_task and link_task.env.variant() == variant:
lst.append(link_task.outputs[0].parent.abspath(obj.env))
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env)
proc = Utils.pproc.Popen(filename, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE)
(stdout, stderr) = proc.communicate()
tup = (filename, proc.returncode, stdout, stderr)
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
finally:
testlock.release()
cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin')
old = cls.runnable_status
def test_status(self):
if getattr(Options.options, 'all_tests', False):
return RUN_ME
return old(self)
cls.runnable_status = test_status
cls.quiet = 1
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if lst:
Utils.pprint('CYAN', 'execution summary')
total = len(lst)
tfail = len([x for x in lst if x[1]])
Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
for (f, code, out, err) in lst:
if not code:
Utils.pprint('CYAN', ' %s' % f)
Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
for (f, code, out, err) in lst:
if code:
Utils.pprint('CYAN', ' %s' % f)
|
jeasoft/odoo
|
refs/heads/marcos-8.0
|
addons/account_anglo_saxon/stock.py
|
208
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class stock_move(osv.Model):
_inherit = "stock.move"
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
""" Add a reference to the stock.move in the invoice line
In anglo-saxon the price for COGS should be taken from stock.move
if possible (fallback on standard_price)
"""
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
res.update({
'move_id': move.id,
})
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
_description = "Picking List"
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
'''Return ids of created invoices for the pickings'''
res = super(stock_picking,self).action_invoice_create(cr, uid, ids, journal_id, group, type, context=context)
if type in ('in_invoice', 'in_refund'):
for inv in self.pool.get('account.invoice').browse(cr, uid, res, context=context):
for ol in inv.invoice_line:
if ol.product_id.type != 'service':
oa = ol.product_id.property_stock_account_input and ol.product_id.property_stock_account_input.id
if not oa:
oa = ol.product_id.categ_id.property_stock_account_input_categ and ol.product_id.categ_id.property_stock_account_input_categ.id
if oa:
fpos = ol.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
self.pool.get('account.invoice.line').write(cr, uid, [ol.id], {'account_id': a})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nicko96/Chrome-Infra
|
refs/heads/master
|
appengine/findit/handlers/test/failure_log_test.py
|
1
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from google.appengine.ext import testbed
import webapp2
import webtest
from testing_utils import testing
from handlers import failure_log
from waterfall import buildbot
from model.wf_step import WfStep
# Root directory appengine/findit.
ROOT_DIR = os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir)
class FailureLogTest(testing.AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/failure-log', failure_log.FailureLog),
], debug=True)
def testInvalidStepUrl(self):
step_url = 'abcde'
self.mock_current_user(user_email='test@google.com', is_admin=True)
self.assertRaisesRegexp(
webtest.app.AppError,
re.compile('.*501 Not Implemented.*Url "%s" '
'is not pointing to a step.*' % step_url,
re.MULTILINE|re.DOTALL),
self.test_app.get, '/failure-log', params={'url': step_url})
def testFailureLogNotFound(self):
master_name = 'm'
builder_name = 'b 1'
build_number = 123
step_name = 'compile'
step_url = buildbot.CreateStdioLogUrl(
master_name, builder_name, build_number, step_name)
self.mock_current_user(user_email='test@google.com', is_admin=True)
self.assertRaisesRegexp(
webtest.app.AppError,
re.compile('.*404 Not Found.*No failure log available.*',
re.MULTILINE|re.DOTALL),
self.test_app.get, '/failure-log', params={'url': step_url,
'format': 'json'})
def testFailureLogFetched(self):
master_name = 'm'
builder_name = 'b 1'
build_number = 123
step_name = 'compile'
step_url = buildbot.CreateStdioLogUrl(
master_name, builder_name, build_number, step_name)
step_log = WfStep.Create(master_name, builder_name, build_number, step_name)
step_log.log_data = 'Log has been successfully fetched!'
step_log.put()
self.mock_current_user(user_email='test@google.com', is_admin=True)
response = self.test_app.get('/failure-log', params={'url': step_url,
'format': 'json'})
expected_response = {
'master_name': 'm',
'builder_name': 'b 1',
'build_number': 123,
'step_name': 'compile',
'step_logs': 'Log has been successfully fetched!'
}
self.assertEquals(200, response.status_int)
self.assertEquals(expected_response, response.json_body)
|
adamtiger/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
|
70
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.chain import Chain
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class ChainBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation."""
def testBijector(self):
with self.test_session():
chain = Chain((Exp(event_ndims=1), Softplus(event_ndims=1)))
self.assertEqual("chain_of_exp_of_softplus", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(1. + np.exp(x), chain.forward(x).eval())
self.assertAllClose(np.log(x - 1.), chain.inverse(x).eval())
self.assertAllClose(
-np.sum(np.log(x - 1.), axis=2),
chain.inverse_log_det_jacobian(x).eval())
self.assertAllClose(
np.sum(x, axis=2), chain.forward_log_det_jacobian(x).eval())
def testBijectorIdentity(self):
with self.test_session():
chain = Chain()
self.assertEqual("identity", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(x, chain.forward(x).eval())
self.assertAllClose(x, chain.inverse(x).eval())
self.assertAllClose(0., chain.inverse_log_det_jacobian(x).eval())
self.assertAllClose(0., chain.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = Chain((Exp(), Softplus()))
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
bijector = Chain([
SoftmaxCentered(
event_ndims=1, validate_args=True),
SoftmaxCentered(
event_ndims=0, validate_args=True)
])
x = tensor_shape.TensorShape([])
y = tensor_shape.TensorShape([2 + 1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
if __name__ == "__main__":
test.main()
|
simzacks/jjb
|
refs/heads/master
|
jenkins_jobs/modules/metadata.py
|
25
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Metadata plugin module enables the ability to add metadata to the projects
that can be exposed to job environment.
Requires the Jenkins :jenkins-wiki:`Metadata Plugin <Metadata+plugin>`.
**Component**: metadata
:Macro: metadata
:Entry Point: jenkins_jobs.metadata
Example::
metadata:
- string:
name: FOO
value: bar
expose-to-env: true
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
def base_metadata(parser, xml_parent, data, mtype):
pdef = XML.SubElement(xml_parent, mtype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'generated').text = 'false'
XML.SubElement(pdef, 'parent', attrib={"class": "job-metadata",
"reference": "../../.."})
exposed_to_env = XML.SubElement(pdef, 'exposedToEnvironment')
exposed_to_env.text = str(data.get('expose-to-env', False)).lower()
return pdef
def string_metadata(parser, xml_parent, data):
"""yaml: string
A string metadata.
:arg str name: the name of the metadata
:arg str value: the value of the metadata
:arg bool expose-to-env: expose to environment (optional)
Example::
metadata:
- string:
name: FOO
value: bar
expose-to-env: true
"""
pdef = base_metadata(parser, xml_parent, data,
'metadata-string')
value = data.get('value', '')
XML.SubElement(pdef, 'value').text = value
def number_metadata(parser, xml_parent, data):
"""yaml: number
A number metadata.
:arg str name: the name of the metadata
:arg str value: the value of the metadata
:arg bool expose-to-env: expose to environment (optional)
Example::
metadata:
- number:
name: FOO
value: 1
expose-to-env: true
"""
pdef = base_metadata(parser, xml_parent, data,
'metadata-number')
value = data.get('value', '')
XML.SubElement(pdef, 'value').text = value
def date_metadata(parser, xml_parent, data):
"""yaml: date
A date metadata
:arg str name: the name of the metadata
:arg str time: time value in millisec since 1970-01-01 00:00:00 UTC
:arg str timezone: time zone of the metadata
:arg bool expose-to-env: expose to environment (optional)
Example::
metadata:
- date:
name: FOO
value: 1371708900268
timezone: Australia/Melbourne
expose-to-env: true
"""
pdef = base_metadata(parser, xml_parent, data,
'metadata-date')
# TODO: convert time from any reasonable format into epoch
mval = XML.SubElement(pdef, 'value')
XML.SubElement(mval, 'time').text = data['time']
XML.SubElement(mval, 'timezone').text = data['timezone']
XML.SubElement(pdef, 'checked').text = 'true'
class Metadata(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'metadata'
component_list_type = 'metadata'
def gen_xml(self, parser, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
metadata = data.get('metadata', [])
if metadata:
pdefp = XML.SubElement(properties,
'job-metadata', plugin="metadata@1.0b")
pdefs = XML.SubElement(pdefp, 'values')
for mdata in metadata:
self.registry.dispatch('metadata',
parser, pdefs, mdata)
|
SravanthiSinha/edx-platform
|
refs/heads/master
|
openedx/core/lib/logsettings.py
|
127
|
"""Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
|
foobarbazblarg/stayclean
|
refs/heads/master
|
stayclean-2015-september/display.py
|
1
|
#!/usr/bin/python
import participantCollection
import re
import datetime
import pyperclip
currentMonthIndex = datetime.date.today().month
#TODO: need to figure out how to get total days in current month...
currentMonthTotalDays = 30
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
currentDayOfMonthIndex = 29
# TODO: more...
currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
percentStillIn = int(round(100*numberStillIn/initialNumber,0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. We will no longer be accepting new signups. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2to9():
print '2 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15-currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I might re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
#TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
# return stringToPrintLegacy()
if currentDayOfMonthIndex == 1:
return templateFor1()
#elif ( currentDayOfMonthIndex >= 2 ) and ( currentDayOfMonthIndex <= 9 ):
elif ( 2 <= currentDayOfMonthIndex <= 9 ):
return templateFor2to9()
#elif ( currentDayOfMonthIndex >= 10 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( 10 <= currentDayOfMonthIndex <= 14 ):
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
#elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= currentMonthPenultimateDayIndex ):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )
answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )
answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )
answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )
answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )
answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
# print re.sub('FOO', 'there', 'hello FOO yall')
# for participant in participantCollection.participantsWhoAreStillIn():
# if participant.hasCheckedIn:
# print "/u/" + participant.name
# else:
# print "/u/" + participant.name + " ~"
# print ""
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/sqlparse/keywords.py
|
10
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
import re
from sqlparse import tokens
def is_keyword(value):
val = value.upper()
return (KEYWORDS_COMMON.get(val) or
KEYWORDS_ORACLE.get(val) or
KEYWORDS.get(val, tokens.Name)), value
SQL_REGEX = {
'root': [
(r'(--|# )\+.*?(\r\n|\r|\n|$)', tokens.Comment.Single.Hint),
(r'/\*\+[\s\S]*?\*/', tokens.Comment.Multiline.Hint),
(r'(--|# ).*?(\r\n|\r|\n|$)', tokens.Comment.Single),
(r'/\*[\s\S]*?\*/', tokens.Comment.Multiline),
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'\*', tokens.Wildcard),
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'(\$(?:[_A-Z]\w*)?\$)[\s\S]*?\1', tokens.Literal),
(r'\?', tokens.Name.Placeholder),
(r'%(\(\w+\))?s', tokens.Name.Placeholder),
(r'(?<!\w)[$:?]\w+', tokens.Name.Placeholder),
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
# IN is special, it may be followed by a parenthesis, but
# is never a functino, see issue183
(r'(CASE|IN|VALUES|USING)\b', tokens.Keyword),
(r'(@|##|#)[A-Z]\w+', tokens.Name),
# see issue #39
# Spaces around period `schema . name` are valid identifier
# TODO: Spaces before period not implemented
(r'[A-Z]\w*(?=\s*\.)', tokens.Name), # 'Name' .
(r'(?<=\.)[A-Z]\w*', tokens.Name), # .'Name'
(r'[A-Z]\w*(?=\()', tokens.Name), # side effect: change kw to func
# TODO: `1.` and `.1` are valid numbers
(r'-?0x[\dA-F]+', tokens.Number.Hexadecimal),
(r'-?\d*(\.\d+)?E-?\d+', tokens.Number.Float),
(r'-?\d*\.\d+', tokens.Number.Float),
(r'-?\d+', tokens.Number.Integer),
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
# sqlite names can be escaped with [square brackets]. left bracket
# cannot be preceded by word character or a right bracket --
# otherwise it's probably an array index
(r'(?<![\w\])])(\[[^\]]+\])', tokens.Name),
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?'
r'|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP|\s+WHILE)?\b', tokens.Keyword),
(r'NOT\s+NULL\b', tokens.Keyword),
(r'UNION\s+ALL\b', tokens.Keyword),
(r'CREATE(\s+OR\s+REPLACE)?\b', tokens.Keyword.DDL),
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
(r'[_A-Z][_$#\w]*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
]}
FLAGS = re.IGNORECASE | re.UNICODE
SQL_REGEX = [(re.compile(rx, FLAGS).match, tt) for rx, tt in SQL_REGEX['root']]
KEYWORDS = {
'ABORT': tokens.Keyword,
'ABS': tokens.Keyword,
'ABSOLUTE': tokens.Keyword,
'ACCESS': tokens.Keyword,
'ADA': tokens.Keyword,
'ADD': tokens.Keyword,
'ADMIN': tokens.Keyword,
'AFTER': tokens.Keyword,
'AGGREGATE': tokens.Keyword,
'ALIAS': tokens.Keyword,
'ALL': tokens.Keyword,
'ALLOCATE': tokens.Keyword,
'ANALYSE': tokens.Keyword,
'ANALYZE': tokens.Keyword,
'ANY': tokens.Keyword,
'ARRAYLEN': tokens.Keyword,
'ARE': tokens.Keyword,
'ASC': tokens.Keyword.Order,
'ASENSITIVE': tokens.Keyword,
'ASSERTION': tokens.Keyword,
'ASSIGNMENT': tokens.Keyword,
'ASYMMETRIC': tokens.Keyword,
'AT': tokens.Keyword,
'ATOMIC': tokens.Keyword,
'AUDIT': tokens.Keyword,
'AUTHORIZATION': tokens.Keyword,
'AVG': tokens.Keyword,
'BACKWARD': tokens.Keyword,
'BEFORE': tokens.Keyword,
'BEGIN': tokens.Keyword,
'BETWEEN': tokens.Keyword,
'BITVAR': tokens.Keyword,
'BIT_LENGTH': tokens.Keyword,
'BOTH': tokens.Keyword,
'BREADTH': tokens.Keyword,
# 'C': tokens.Keyword, # most likely this is an alias
'CACHE': tokens.Keyword,
'CALL': tokens.Keyword,
'CALLED': tokens.Keyword,
'CARDINALITY': tokens.Keyword,
'CASCADE': tokens.Keyword,
'CASCADED': tokens.Keyword,
'CAST': tokens.Keyword,
'CATALOG': tokens.Keyword,
'CATALOG_NAME': tokens.Keyword,
'CHAIN': tokens.Keyword,
'CHARACTERISTICS': tokens.Keyword,
'CHARACTER_LENGTH': tokens.Keyword,
'CHARACTER_SET_CATALOG': tokens.Keyword,
'CHARACTER_SET_NAME': tokens.Keyword,
'CHARACTER_SET_SCHEMA': tokens.Keyword,
'CHAR_LENGTH': tokens.Keyword,
'CHECK': tokens.Keyword,
'CHECKED': tokens.Keyword,
'CHECKPOINT': tokens.Keyword,
'CLASS': tokens.Keyword,
'CLASS_ORIGIN': tokens.Keyword,
'CLOB': tokens.Keyword,
'CLOSE': tokens.Keyword,
'CLUSTER': tokens.Keyword,
'COALESCE': tokens.Keyword,
'COBOL': tokens.Keyword,
'COLLATE': tokens.Keyword,
'COLLATION': tokens.Keyword,
'COLLATION_CATALOG': tokens.Keyword,
'COLLATION_NAME': tokens.Keyword,
'COLLATION_SCHEMA': tokens.Keyword,
'COLLECT': tokens.Keyword,
'COLUMN': tokens.Keyword,
'COLUMN_NAME': tokens.Keyword,
'COMPRESS': tokens.Keyword,
'COMMAND_FUNCTION': tokens.Keyword,
'COMMAND_FUNCTION_CODE': tokens.Keyword,
'COMMENT': tokens.Keyword,
'COMMIT': tokens.Keyword.DML,
'COMMITTED': tokens.Keyword,
'COMPLETION': tokens.Keyword,
'CONCURRENTLY': tokens.Keyword,
'CONDITION_NUMBER': tokens.Keyword,
'CONNECT': tokens.Keyword,
'CONNECTION': tokens.Keyword,
'CONNECTION_NAME': tokens.Keyword,
'CONSTRAINT': tokens.Keyword,
'CONSTRAINTS': tokens.Keyword,
'CONSTRAINT_CATALOG': tokens.Keyword,
'CONSTRAINT_NAME': tokens.Keyword,
'CONSTRAINT_SCHEMA': tokens.Keyword,
'CONSTRUCTOR': tokens.Keyword,
'CONTAINS': tokens.Keyword,
'CONTINUE': tokens.Keyword,
'CONVERSION': tokens.Keyword,
'CONVERT': tokens.Keyword,
'COPY': tokens.Keyword,
'CORRESPONTING': tokens.Keyword,
'COUNT': tokens.Keyword,
'CREATEDB': tokens.Keyword,
'CREATEUSER': tokens.Keyword,
'CROSS': tokens.Keyword,
'CUBE': tokens.Keyword,
'CURRENT': tokens.Keyword,
'CURRENT_DATE': tokens.Keyword,
'CURRENT_PATH': tokens.Keyword,
'CURRENT_ROLE': tokens.Keyword,
'CURRENT_TIME': tokens.Keyword,
'CURRENT_TIMESTAMP': tokens.Keyword,
'CURRENT_USER': tokens.Keyword,
'CURSOR': tokens.Keyword,
'CURSOR_NAME': tokens.Keyword,
'CYCLE': tokens.Keyword,
'DATA': tokens.Keyword,
'DATABASE': tokens.Keyword,
'DATETIME_INTERVAL_CODE': tokens.Keyword,
'DATETIME_INTERVAL_PRECISION': tokens.Keyword,
'DAY': tokens.Keyword,
'DEALLOCATE': tokens.Keyword,
'DECLARE': tokens.Keyword,
'DEFAULT': tokens.Keyword,
'DEFAULTS': tokens.Keyword,
'DEFERRABLE': tokens.Keyword,
'DEFERRED': tokens.Keyword,
'DEFINED': tokens.Keyword,
'DEFINER': tokens.Keyword,
'DELIMITER': tokens.Keyword,
'DELIMITERS': tokens.Keyword,
'DEREF': tokens.Keyword,
'DESC': tokens.Keyword.Order,
'DESCRIBE': tokens.Keyword,
'DESCRIPTOR': tokens.Keyword,
'DESTROY': tokens.Keyword,
'DESTRUCTOR': tokens.Keyword,
'DETERMINISTIC': tokens.Keyword,
'DIAGNOSTICS': tokens.Keyword,
'DICTIONARY': tokens.Keyword,
'DISABLE': tokens.Keyword,
'DISCONNECT': tokens.Keyword,
'DISPATCH': tokens.Keyword,
'DO': tokens.Keyword,
'DOMAIN': tokens.Keyword,
'DYNAMIC': tokens.Keyword,
'DYNAMIC_FUNCTION': tokens.Keyword,
'DYNAMIC_FUNCTION_CODE': tokens.Keyword,
'EACH': tokens.Keyword,
'ENABLE': tokens.Keyword,
'ENCODING': tokens.Keyword,
'ENCRYPTED': tokens.Keyword,
'END-EXEC': tokens.Keyword,
'EQUALS': tokens.Keyword,
'ESCAPE': tokens.Keyword,
'EVERY': tokens.Keyword,
'EXCEPT': tokens.Keyword,
'EXCEPTION': tokens.Keyword,
'EXCLUDING': tokens.Keyword,
'EXCLUSIVE': tokens.Keyword,
'EXEC': tokens.Keyword,
'EXECUTE': tokens.Keyword,
'EXISTING': tokens.Keyword,
'EXISTS': tokens.Keyword,
'EXTERNAL': tokens.Keyword,
'EXTRACT': tokens.Keyword,
'FALSE': tokens.Keyword,
'FETCH': tokens.Keyword,
'FILE': tokens.Keyword,
'FINAL': tokens.Keyword,
'FIRST': tokens.Keyword,
'FORCE': tokens.Keyword,
'FOREACH': tokens.Keyword,
'FOREIGN': tokens.Keyword,
'FORTRAN': tokens.Keyword,
'FORWARD': tokens.Keyword,
'FOUND': tokens.Keyword,
'FREE': tokens.Keyword,
'FREEZE': tokens.Keyword,
'FULL': tokens.Keyword,
'FUNCTION': tokens.Keyword,
# 'G': tokens.Keyword,
'GENERAL': tokens.Keyword,
'GENERATED': tokens.Keyword,
'GET': tokens.Keyword,
'GLOBAL': tokens.Keyword,
'GO': tokens.Keyword,
'GOTO': tokens.Keyword,
'GRANT': tokens.Keyword,
'GRANTED': tokens.Keyword,
'GROUPING': tokens.Keyword,
'HANDLER': tokens.Keyword,
'HAVING': tokens.Keyword,
'HIERARCHY': tokens.Keyword,
'HOLD': tokens.Keyword,
'HOST': tokens.Keyword,
'IDENTIFIED': tokens.Keyword,
'IDENTITY': tokens.Keyword,
'IGNORE': tokens.Keyword,
'ILIKE': tokens.Keyword,
'IMMEDIATE': tokens.Keyword,
'IMMUTABLE': tokens.Keyword,
'IMPLEMENTATION': tokens.Keyword,
'IMPLICIT': tokens.Keyword,
'INCLUDING': tokens.Keyword,
'INCREMENT': tokens.Keyword,
'INDEX': tokens.Keyword,
'INDITCATOR': tokens.Keyword,
'INFIX': tokens.Keyword,
'INHERITS': tokens.Keyword,
'INITIAL': tokens.Keyword,
'INITIALIZE': tokens.Keyword,
'INITIALLY': tokens.Keyword,
'INOUT': tokens.Keyword,
'INPUT': tokens.Keyword,
'INSENSITIVE': tokens.Keyword,
'INSTANTIABLE': tokens.Keyword,
'INSTEAD': tokens.Keyword,
'INTERSECT': tokens.Keyword,
'INTO': tokens.Keyword,
'INVOKER': tokens.Keyword,
'IS': tokens.Keyword,
'ISNULL': tokens.Keyword,
'ISOLATION': tokens.Keyword,
'ITERATE': tokens.Keyword,
# 'K': tokens.Keyword,
'KEY': tokens.Keyword,
'KEY_MEMBER': tokens.Keyword,
'KEY_TYPE': tokens.Keyword,
'LANCOMPILER': tokens.Keyword,
'LANGUAGE': tokens.Keyword,
'LARGE': tokens.Keyword,
'LAST': tokens.Keyword,
'LATERAL': tokens.Keyword,
'LEADING': tokens.Keyword,
'LENGTH': tokens.Keyword,
'LESS': tokens.Keyword,
'LEVEL': tokens.Keyword,
'LIMIT': tokens.Keyword,
'LISTEN': tokens.Keyword,
'LOAD': tokens.Keyword,
'LOCAL': tokens.Keyword,
'LOCALTIME': tokens.Keyword,
'LOCALTIMESTAMP': tokens.Keyword,
'LOCATION': tokens.Keyword,
'LOCATOR': tokens.Keyword,
'LOCK': tokens.Keyword,
'LOWER': tokens.Keyword,
# 'M': tokens.Keyword,
'MAP': tokens.Keyword,
'MATCH': tokens.Keyword,
'MAXEXTENTS': tokens.Keyword,
'MAXVALUE': tokens.Keyword,
'MESSAGE_LENGTH': tokens.Keyword,
'MESSAGE_OCTET_LENGTH': tokens.Keyword,
'MESSAGE_TEXT': tokens.Keyword,
'METHOD': tokens.Keyword,
'MINUTE': tokens.Keyword,
'MINUS': tokens.Keyword,
'MINVALUE': tokens.Keyword,
'MOD': tokens.Keyword,
'MODE': tokens.Keyword,
'MODIFIES': tokens.Keyword,
'MODIFY': tokens.Keyword,
'MONTH': tokens.Keyword,
'MORE': tokens.Keyword,
'MOVE': tokens.Keyword,
'MUMPS': tokens.Keyword,
'NAMES': tokens.Keyword,
'NATIONAL': tokens.Keyword,
'NATURAL': tokens.Keyword,
'NCHAR': tokens.Keyword,
'NCLOB': tokens.Keyword,
'NEW': tokens.Keyword,
'NEXT': tokens.Keyword,
'NO': tokens.Keyword,
'NOAUDIT': tokens.Keyword,
'NOCOMPRESS': tokens.Keyword,
'NOCREATEDB': tokens.Keyword,
'NOCREATEUSER': tokens.Keyword,
'NONE': tokens.Keyword,
'NOT': tokens.Keyword,
'NOTFOUND': tokens.Keyword,
'NOTHING': tokens.Keyword,
'NOTIFY': tokens.Keyword,
'NOTNULL': tokens.Keyword,
'NOWAIT': tokens.Keyword,
'NULL': tokens.Keyword,
'NULLABLE': tokens.Keyword,
'NULLIF': tokens.Keyword,
'OBJECT': tokens.Keyword,
'OCTET_LENGTH': tokens.Keyword,
'OF': tokens.Keyword,
'OFF': tokens.Keyword,
'OFFLINE': tokens.Keyword,
'OFFSET': tokens.Keyword,
'OIDS': tokens.Keyword,
'OLD': tokens.Keyword,
'ONLINE': tokens.Keyword,
'ONLY': tokens.Keyword,
'OPEN': tokens.Keyword,
'OPERATION': tokens.Keyword,
'OPERATOR': tokens.Keyword,
'OPTION': tokens.Keyword,
'OPTIONS': tokens.Keyword,
'ORDINALITY': tokens.Keyword,
'OUT': tokens.Keyword,
'OUTPUT': tokens.Keyword,
'OVERLAPS': tokens.Keyword,
'OVERLAY': tokens.Keyword,
'OVERRIDING': tokens.Keyword,
'OWNER': tokens.Keyword,
'PAD': tokens.Keyword,
'PARAMETER': tokens.Keyword,
'PARAMETERS': tokens.Keyword,
'PARAMETER_MODE': tokens.Keyword,
'PARAMATER_NAME': tokens.Keyword,
'PARAMATER_ORDINAL_POSITION': tokens.Keyword,
'PARAMETER_SPECIFIC_CATALOG': tokens.Keyword,
'PARAMETER_SPECIFIC_NAME': tokens.Keyword,
'PARAMATER_SPECIFIC_SCHEMA': tokens.Keyword,
'PARTIAL': tokens.Keyword,
'PASCAL': tokens.Keyword,
'PCTFREE': tokens.Keyword,
'PENDANT': tokens.Keyword,
'PLACING': tokens.Keyword,
'PLI': tokens.Keyword,
'POSITION': tokens.Keyword,
'POSTFIX': tokens.Keyword,
'PRECISION': tokens.Keyword,
'PREFIX': tokens.Keyword,
'PREORDER': tokens.Keyword,
'PREPARE': tokens.Keyword,
'PRESERVE': tokens.Keyword,
'PRIMARY': tokens.Keyword,
'PRIOR': tokens.Keyword,
'PRIVILEGES': tokens.Keyword,
'PROCEDURAL': tokens.Keyword,
'PROCEDURE': tokens.Keyword,
'PUBLIC': tokens.Keyword,
'RAISE': tokens.Keyword,
'RAW': tokens.Keyword,
'READ': tokens.Keyword,
'READS': tokens.Keyword,
'RECHECK': tokens.Keyword,
'RECURSIVE': tokens.Keyword,
'REF': tokens.Keyword,
'REFERENCES': tokens.Keyword,
'REFERENCING': tokens.Keyword,
'REINDEX': tokens.Keyword,
'RELATIVE': tokens.Keyword,
'RENAME': tokens.Keyword,
'REPEATABLE': tokens.Keyword,
'RESET': tokens.Keyword,
'RESOURCE': tokens.Keyword,
'RESTART': tokens.Keyword,
'RESTRICT': tokens.Keyword,
'RESULT': tokens.Keyword,
'RETURN': tokens.Keyword,
'RETURNED_LENGTH': tokens.Keyword,
'RETURNED_OCTET_LENGTH': tokens.Keyword,
'RETURNED_SQLSTATE': tokens.Keyword,
'RETURNING': tokens.Keyword,
'RETURNS': tokens.Keyword,
'REVOKE': tokens.Keyword,
'RIGHT': tokens.Keyword,
'ROLE': tokens.Keyword,
'ROLLBACK': tokens.Keyword.DML,
'ROLLUP': tokens.Keyword,
'ROUTINE': tokens.Keyword,
'ROUTINE_CATALOG': tokens.Keyword,
'ROUTINE_NAME': tokens.Keyword,
'ROUTINE_SCHEMA': tokens.Keyword,
'ROW': tokens.Keyword,
'ROWS': tokens.Keyword,
'ROW_COUNT': tokens.Keyword,
'RULE': tokens.Keyword,
'SAVE_POINT': tokens.Keyword,
'SCALE': tokens.Keyword,
'SCHEMA': tokens.Keyword,
'SCHEMA_NAME': tokens.Keyword,
'SCOPE': tokens.Keyword,
'SCROLL': tokens.Keyword,
'SEARCH': tokens.Keyword,
'SECOND': tokens.Keyword,
'SECURITY': tokens.Keyword,
'SELF': tokens.Keyword,
'SENSITIVE': tokens.Keyword,
'SEQUENCE': tokens.Keyword,
'SERIALIZABLE': tokens.Keyword,
'SERVER_NAME': tokens.Keyword,
'SESSION': tokens.Keyword,
'SESSION_USER': tokens.Keyword,
'SETOF': tokens.Keyword,
'SETS': tokens.Keyword,
'SHARE': tokens.Keyword,
'SHOW': tokens.Keyword,
'SIMILAR': tokens.Keyword,
'SIMPLE': tokens.Keyword,
'SIZE': tokens.Keyword,
'SOME': tokens.Keyword,
'SOURCE': tokens.Keyword,
'SPACE': tokens.Keyword,
'SPECIFIC': tokens.Keyword,
'SPECIFICTYPE': tokens.Keyword,
'SPECIFIC_NAME': tokens.Keyword,
'SQL': tokens.Keyword,
'SQLBUF': tokens.Keyword,
'SQLCODE': tokens.Keyword,
'SQLERROR': tokens.Keyword,
'SQLEXCEPTION': tokens.Keyword,
'SQLSTATE': tokens.Keyword,
'SQLWARNING': tokens.Keyword,
'STABLE': tokens.Keyword,
'START': tokens.Keyword.DML,
# 'STATE': tokens.Keyword,
'STATEMENT': tokens.Keyword,
'STATIC': tokens.Keyword,
'STATISTICS': tokens.Keyword,
'STDIN': tokens.Keyword,
'STDOUT': tokens.Keyword,
'STORAGE': tokens.Keyword,
'STRICT': tokens.Keyword,
'STRUCTURE': tokens.Keyword,
'STYPE': tokens.Keyword,
'SUBCLASS_ORIGIN': tokens.Keyword,
'SUBLIST': tokens.Keyword,
'SUBSTRING': tokens.Keyword,
'SUCCESSFUL': tokens.Keyword,
'SUM': tokens.Keyword,
'SYMMETRIC': tokens.Keyword,
'SYNONYM': tokens.Keyword,
'SYSID': tokens.Keyword,
'SYSTEM': tokens.Keyword,
'SYSTEM_USER': tokens.Keyword,
'TABLE': tokens.Keyword,
'TABLE_NAME': tokens.Keyword,
'TEMP': tokens.Keyword,
'TEMPLATE': tokens.Keyword,
'TEMPORARY': tokens.Keyword,
'TERMINATE': tokens.Keyword,
'THAN': tokens.Keyword,
'TIMESTAMP': tokens.Keyword,
'TIMEZONE_HOUR': tokens.Keyword,
'TIMEZONE_MINUTE': tokens.Keyword,
'TO': tokens.Keyword,
'TOAST': tokens.Keyword,
'TRAILING': tokens.Keyword,
'TRANSATION': tokens.Keyword,
'TRANSACTIONS_COMMITTED': tokens.Keyword,
'TRANSACTIONS_ROLLED_BACK': tokens.Keyword,
'TRANSATION_ACTIVE': tokens.Keyword,
'TRANSFORM': tokens.Keyword,
'TRANSFORMS': tokens.Keyword,
'TRANSLATE': tokens.Keyword,
'TRANSLATION': tokens.Keyword,
'TREAT': tokens.Keyword,
'TRIGGER': tokens.Keyword,
'TRIGGER_CATALOG': tokens.Keyword,
'TRIGGER_NAME': tokens.Keyword,
'TRIGGER_SCHEMA': tokens.Keyword,
'TRIM': tokens.Keyword,
'TRUE': tokens.Keyword,
'TRUNCATE': tokens.Keyword,
'TRUSTED': tokens.Keyword,
'TYPE': tokens.Keyword,
'UID': tokens.Keyword,
'UNCOMMITTED': tokens.Keyword,
'UNDER': tokens.Keyword,
'UNENCRYPTED': tokens.Keyword,
'UNION': tokens.Keyword,
'UNIQUE': tokens.Keyword,
'UNKNOWN': tokens.Keyword,
'UNLISTEN': tokens.Keyword,
'UNNAMED': tokens.Keyword,
'UNNEST': tokens.Keyword,
'UNTIL': tokens.Keyword,
'UPPER': tokens.Keyword,
'USAGE': tokens.Keyword,
'USE': tokens.Keyword,
'USER': tokens.Keyword,
'USER_DEFINED_TYPE_CATALOG': tokens.Keyword,
'USER_DEFINED_TYPE_NAME': tokens.Keyword,
'USER_DEFINED_TYPE_SCHEMA': tokens.Keyword,
'USING': tokens.Keyword,
'VACUUM': tokens.Keyword,
'VALID': tokens.Keyword,
'VALIDATE': tokens.Keyword,
'VALIDATOR': tokens.Keyword,
'VALUES': tokens.Keyword,
'VARIABLE': tokens.Keyword,
'VERBOSE': tokens.Keyword,
'VERSION': tokens.Keyword,
'VIEW': tokens.Keyword,
'VOLATILE': tokens.Keyword,
'WHENEVER': tokens.Keyword,
'WITH': tokens.Keyword.CTE,
'WITHOUT': tokens.Keyword,
'WORK': tokens.Keyword,
'WRITE': tokens.Keyword,
'YEAR': tokens.Keyword,
'ZONE': tokens.Keyword,
# Name.Builtin
'ARRAY': tokens.Name.Builtin,
'BIGINT': tokens.Name.Builtin,
'BINARY': tokens.Name.Builtin,
'BIT': tokens.Name.Builtin,
'BLOB': tokens.Name.Builtin,
'BOOLEAN': tokens.Name.Builtin,
'CHAR': tokens.Name.Builtin,
'CHARACTER': tokens.Name.Builtin,
'DATE': tokens.Name.Builtin,
'DEC': tokens.Name.Builtin,
'DECIMAL': tokens.Name.Builtin,
'FLOAT': tokens.Name.Builtin,
'INT': tokens.Name.Builtin,
'INT8': tokens.Name.Builtin,
'INTEGER': tokens.Name.Builtin,
'INTERVAL': tokens.Name.Builtin,
'LONG': tokens.Name.Builtin,
'NUMBER': tokens.Name.Builtin,
'NUMERIC': tokens.Name.Builtin,
'REAL': tokens.Name.Builtin,
'ROWID': tokens.Name.Builtin,
'ROWLABEL': tokens.Name.Builtin,
'ROWNUM': tokens.Name.Builtin,
'SERIAL': tokens.Name.Builtin,
'SERIAL8': tokens.Name.Builtin,
'SIGNED': tokens.Name.Builtin,
'SMALLINT': tokens.Name.Builtin,
'SYSDATE': tokens.Name.Builtin,
'TEXT': tokens.Name.Builtin,
'TINYINT': tokens.Name.Builtin,
'UNSIGNED': tokens.Name.Builtin,
'VARCHAR': tokens.Name.Builtin,
'VARCHAR2': tokens.Name.Builtin,
'VARYING': tokens.Name.Builtin,
}
KEYWORDS_COMMON = {
'SELECT': tokens.Keyword.DML,
'INSERT': tokens.Keyword.DML,
'DELETE': tokens.Keyword.DML,
'UPDATE': tokens.Keyword.DML,
'REPLACE': tokens.Keyword.DML,
'MERGE': tokens.Keyword.DML,
'DROP': tokens.Keyword.DDL,
'CREATE': tokens.Keyword.DDL,
'ALTER': tokens.Keyword.DDL,
'WHERE': tokens.Keyword,
'FROM': tokens.Keyword,
'INNER': tokens.Keyword,
'JOIN': tokens.Keyword,
'STRAIGHT_JOIN': tokens.Keyword,
'AND': tokens.Keyword,
'OR': tokens.Keyword,
'LIKE': tokens.Keyword,
'ON': tokens.Keyword,
'IN': tokens.Keyword,
'SET': tokens.Keyword,
'BY': tokens.Keyword,
'GROUP': tokens.Keyword,
'ORDER': tokens.Keyword,
'LEFT': tokens.Keyword,
'OUTER': tokens.Keyword,
'FULL': tokens.Keyword,
'IF': tokens.Keyword,
'END': tokens.Keyword,
'THEN': tokens.Keyword,
'LOOP': tokens.Keyword,
'AS': tokens.Keyword,
'ELSE': tokens.Keyword,
'FOR': tokens.Keyword,
'WHILE': tokens.Keyword,
'CASE': tokens.Keyword,
'WHEN': tokens.Keyword,
'MIN': tokens.Keyword,
'MAX': tokens.Keyword,
'DISTINCT': tokens.Keyword,
}
KEYWORDS_ORACLE = {
'ARCHIVE': tokens.Keyword,
'ARCHIVELOG': tokens.Keyword,
'BACKUP': tokens.Keyword,
'BECOME': tokens.Keyword,
'BLOCK': tokens.Keyword,
'BODY': tokens.Keyword,
'CANCEL': tokens.Keyword,
'CHANGE': tokens.Keyword,
'COMPILE': tokens.Keyword,
'CONTENTS': tokens.Keyword,
'CONTROLFILE': tokens.Keyword,
'DATAFILE': tokens.Keyword,
'DBA': tokens.Keyword,
'DISMOUNT': tokens.Keyword,
'DOUBLE': tokens.Keyword,
'DUMP': tokens.Keyword,
'EVENTS': tokens.Keyword,
'EXCEPTIONS': tokens.Keyword,
'EXPLAIN': tokens.Keyword,
'EXTENT': tokens.Keyword,
'EXTERNALLY': tokens.Keyword,
'FLUSH': tokens.Keyword,
'FREELIST': tokens.Keyword,
'FREELISTS': tokens.Keyword,
# groups seems too common as table name
# 'GROUPS': tokens.Keyword,
'INDICATOR': tokens.Keyword,
'INITRANS': tokens.Keyword,
'INSTANCE': tokens.Keyword,
'LAYER': tokens.Keyword,
'LINK': tokens.Keyword,
'LISTS': tokens.Keyword,
'LOGFILE': tokens.Keyword,
'MANAGE': tokens.Keyword,
'MANUAL': tokens.Keyword,
'MAXDATAFILES': tokens.Keyword,
'MAXINSTANCES': tokens.Keyword,
'MAXLOGFILES': tokens.Keyword,
'MAXLOGHISTORY': tokens.Keyword,
'MAXLOGMEMBERS': tokens.Keyword,
'MAXTRANS': tokens.Keyword,
'MINEXTENTS': tokens.Keyword,
'MODULE': tokens.Keyword,
'MOUNT': tokens.Keyword,
'NOARCHIVELOG': tokens.Keyword,
'NOCACHE': tokens.Keyword,
'NOCYCLE': tokens.Keyword,
'NOMAXVALUE': tokens.Keyword,
'NOMINVALUE': tokens.Keyword,
'NOORDER': tokens.Keyword,
'NORESETLOGS': tokens.Keyword,
'NORMAL': tokens.Keyword,
'NOSORT': tokens.Keyword,
'OPTIMAL': tokens.Keyword,
'OWN': tokens.Keyword,
'PACKAGE': tokens.Keyword,
'PARALLEL': tokens.Keyword,
'PCTINCREASE': tokens.Keyword,
'PCTUSED': tokens.Keyword,
'PLAN': tokens.Keyword,
'PRIVATE': tokens.Keyword,
'PROFILE': tokens.Keyword,
'QUOTA': tokens.Keyword,
'RECOVER': tokens.Keyword,
'RESETLOGS': tokens.Keyword,
'RESTRICTED': tokens.Keyword,
'REUSE': tokens.Keyword,
'ROLES': tokens.Keyword,
'SAVEPOINT': tokens.Keyword,
'SCN': tokens.Keyword,
'SECTION': tokens.Keyword,
'SEGMENT': tokens.Keyword,
'SHARED': tokens.Keyword,
'SNAPSHOT': tokens.Keyword,
'SORT': tokens.Keyword,
'STATEMENT_ID': tokens.Keyword,
'STOP': tokens.Keyword,
'SWITCH': tokens.Keyword,
'TABLES': tokens.Keyword,
'TABLESPACE': tokens.Keyword,
'THREAD': tokens.Keyword,
'TIME': tokens.Keyword,
'TRACING': tokens.Keyword,
'TRANSACTION': tokens.Keyword,
'TRIGGERS': tokens.Keyword,
'UNLIMITED': tokens.Keyword,
}
|
hbutau/zimlearner
|
refs/heads/master
|
.ropeproject/config.py
|
84
|
# The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = [
'*.pyc', '*~', '.ropeproject', '.hg', '.svn', '_svn', '.git',
'.tox', '.env', 'node_modules', 'bower_components']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
# If `True`, rope will transform a comma list of imports into
# multiple separate import statements when organizing
# imports.
prefs['split_imports'] = False
# If `True`, rope will sort imports alphabetically by module name
# instead of alphabetically by import statement, with from imports
# after normal imports.
prefs['sort_imports_alphabetically'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
|
andreadelrio/bedrock
|
refs/heads/master
|
bedrock/grants/models.py
|
12133432
| |
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/group_by.py
|
122
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
class ActionModule(ActionBase):
''' Create inventory groups based on variables '''
# We need to be able to modify the inventory
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('key', 'parents'))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'key' not in self._task.args:
result['failed'] = True
result['msg'] = "the 'key' param is required when using group_by"
return result
group_name = self._task.args.get('key')
parent_groups = self._task.args.get('parents', ['all'])
if isinstance(parent_groups, string_types):
parent_groups = [parent_groups]
result['changed'] = False
result['add_group'] = group_name.replace(' ', '-')
result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups]
return result
|
mtiny/pyroute2
|
refs/heads/master
|
tests/general/test_netns.py
|
5
|
import os
import time
import fcntl
import subprocess
from pyroute2 import IPDB
from pyroute2 import IPRoute
from pyroute2 import NetNS
from pyroute2 import NSPopen
from pyroute2.common import uifname
from pyroute2.netns.process.proxy import NSPopen as NSPopenDirect
from pyroute2 import netns as netnsmod
from uuid import uuid4
from utils import require_user
class TestNSPopen(object):
def setup(self):
self.ip = IPRoute()
self.names = []
def teardown(self):
self.ip.close()
for ns in self.names:
netnsmod.remove(ns)
def alloc_nsname(self):
nsid = str(uuid4())
self.names.append(nsid)
return nsid
def test_stdio(self):
require_user('root')
nsid = self.alloc_nsname()
nsp = NSPopen(nsid, ['ip', 'ad'],
flags=os.O_CREAT,
stdout=subprocess.PIPE)
output = nsp.stdout.read()
nsp.release()
assert output is not None
def test_fcntl(self):
require_user('root')
nsid = self.alloc_nsname()
nsp = NSPopen(nsid, ['ip', 'ad'],
flags=os.O_CREAT,
stdout=subprocess.PIPE)
flags = nsp.stdout.fcntl(fcntl.F_GETFL)
nsp.release()
assert flags == 0
def test_api_class(self):
api_nspopen = set(dir(NSPopenDirect))
api_popen = set(dir(subprocess.Popen))
assert api_nspopen & api_popen == api_popen
def test_api_object(self):
require_user('root')
nsid = self.alloc_nsname()
nsp = NSPopen(nsid, ['true'], flags=os.O_CREAT, stdout=subprocess.PIPE)
smp = subprocess.Popen(['true'], stdout=subprocess.PIPE)
nsp.communicate()
smp.communicate()
api_nspopen = set(dir(nsp))
api_popen = set(dir(smp))
minimal = set(('communicate', 'kill', 'wait'))
assert minimal & (api_nspopen & api_popen) == minimal
smp.wait()
nsp.wait()
assert nsp.returncode == smp.returncode == 0
nsp.release()
def test_release(self):
require_user('root')
nsid = self.alloc_nsname()
nsp = NSPopen(nsid, ['true'], flags=os.O_CREAT, stdout=subprocess.PIPE)
nsp.communicate()
nsp.wait()
nsp.release()
try:
print(nsp.returncode)
except RuntimeError:
pass
def test_basic(self):
require_user('root')
nsid = self.alloc_nsname()
# create NS and run a child
nsp = NSPopen(nsid,
['ip', '-o', 'link'],
stdout=subprocess.PIPE,
flags=os.O_CREAT)
ret = nsp.communicate()[0].decode('utf-8')
host_links = [x.get_attr('IFLA_IFNAME') for x in self.ip.get_links()]
netns_links = [x.split(':')[1].split('@')[0].strip()
for x in ret.split('\n') if len(x)]
assert nsp.wait() == nsp.returncode == 0
assert set(host_links) & set(netns_links) == set(netns_links)
assert set(netns_links) < set(host_links)
assert not set(netns_links) > set(host_links)
nsp.release()
class TestNetNS(object):
def test_create(self):
require_user('root')
nsid = str(uuid4())
ipdb_main = IPDB()
ipdb_test = IPDB(nl=NetNS(nsid))
if1 = uifname()
if2 = uifname()
# create VETH pair
ipdb_main.create(ifname=if1, kind='veth', peer=if2).commit()
# move the peer to netns
with ipdb_main.interfaces[if2] as veth:
veth.net_ns_fd = nsid
# assign addresses
with ipdb_main.interfaces[if1] as veth:
veth.add_ip('172.16.200.1/24')
veth.up()
with ipdb_test.interfaces[if2] as veth:
veth.add_ip('172.16.200.2/24')
veth.up()
# ping peer
try:
with open('/dev/null', 'w') as fnull:
subprocess.check_call(['ping', '-c', '1', '172.16.200.2'],
stdout=fnull, stderr=fnull)
ret_ping = True
except Exception:
ret_ping = False
# check ARP
time.sleep(0.5)
ret_arp = '172.16.200.1' in list(ipdb_test.interfaces[if2].neighbours)
# ret_arp = list(ipdb_test.interfaces.v0p1.neighbours)
# cleanup
ipdb_main.interfaces[if1].remove().commit()
ipdb_main.release()
ipdb_test.release()
netnsmod.remove(nsid)
assert ret_ping
assert ret_arp
assert nsid not in netnsmod.listnetns()
def test_rename_plus_ipv6(self):
require_user('root')
mtu = 1280 # mtu must be >= 1280 if you plan to use IPv6
txqlen = 2000
nsid = str(uuid4())
ipdb_main = IPDB()
ipdb_test = IPDB(nl=NetNS(nsid))
if1 = uifname()
if2 = uifname()
if3 = uifname()
# create
ipdb_main.create(kind='veth',
ifname=if1,
peer=if2,
mtu=mtu,
txqlen=txqlen).commit()
# move
with ipdb_main.interfaces[if2] as veth:
veth.net_ns_fd = nsid
# set it up
with ipdb_test.interfaces[if2] as veth:
veth.add_ip('fdb3:84e5:4ff4:55e4::1/64')
veth.add_ip('fdff:ffff:ffff:ffc0::1/64')
veth.mtu = mtu
veth.txqlen = txqlen
veth.up()
veth.ifname = if3
veth = ipdb_test.interfaces.get(if3, None)
ipdb_main.release()
ipdb_test.release()
netnsmod.remove(nsid)
# check everything
assert ('fdb3:84e5:4ff4:55e4::1', 64) in veth.ipaddr
assert ('fdff:ffff:ffff:ffc0::1', 64) in veth.ipaddr
assert veth.flags & 1
assert veth.mtu == mtu
assert veth.txqlen == txqlen
|
anthgur/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/manifest/__init__.py
|
109
|
from . import item
from . import manifest
from . import sourcefile
from . import update
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/wrap/WrapInArgumentList.py
|
83
|
def foo(abracadabra1, abracadabra2, abracadabra3, abracadabra4, abracadabra5<caret>
|
1nc1n3rat0r/scapy
|
refs/heads/master
|
scapy/layers/lltd.py
|
7
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""LLTD Protocol
https://msdn.microsoft.com/en-us/library/cc233983.aspx
"""
import struct
from array import array
from scapy.fields import BitField, FlagsField, ByteField, ByteEnumField, \
ShortField, ShortEnumField, ThreeBytesField, IntField, IntEnumField, \
LongField, MultiEnumField, FieldLenField, FieldListField, \
PacketListField, StrLenField, StrLenFieldUtf16, ConditionalField, MACField
from scapy.packet import Packet, Padding, bind_layers
from scapy.plist import PacketList
from scapy.layers.l2 import Ether
from scapy.layers.inet import IPField
from scapy.layers.inet6 import IP6Field
from scapy.data import ETHER_ANY
# Protocol layers
##################
class LLTD(Packet):
name = "LLTD"
answer_hashret = {
# (tos, function) tuple mapping (answer -> query), used by
# .hashret()
(1, 1): (0, 0),
(0, 12): (0, 11),
}
fields_desc = [
ByteField("version", 1),
ByteEnumField("tos", 0, {
0: "Topology discovery",
1: "Quick discovery",
2: "QoS diagnostics",
}),
ByteField("reserved", 0),
MultiEnumField("function", 0, {
0: {
0: "Discover",
1: "Hello",
2: "Emit",
3: "Train",
4: "Probe",
5: "Ack",
6: "Query",
7: "QueryResp",
8: "Reset",
9: "Charge",
10: "Flat",
11: "QueryLargeTlv",
12: "QueryLargeTlvResp",
},
1: {
0: "Discover",
1: "Hello",
8: "Reset",
},
2: {
0: "QosInitializeSink",
1: "QosReady",
2: "QosProbe",
3: "QosQuery",
4: "QosQueryResp",
5: "QosReset",
6: "QosError",
7: "QosAck",
8: "QosCounterSnapshot",
9: "QosCounterResult",
10: "QosCounterLease",
},
}, depends_on=lambda pkt: pkt.tos, fmt="B"),
MACField("real_dst", None),
MACField("real_src", None),
ConditionalField(ShortField("xid", 0),
lambda pkt: pkt.function in [0, 8]),
ConditionalField(ShortField("seq", 0),
lambda pkt: pkt.function not in [0, 8]),
]
def post_build(self, pkt, pay):
if (self.real_dst is None or self.real_src is None) and \
isinstance(self.underlayer, Ether):
eth = self.underlayer
if self.real_dst is None:
pkt = (pkt[:4] + eth.fields_desc[0].i2m(eth, eth.dst) +
pkt[10:])
if self.real_src is None:
pkt = (pkt[:10] + eth.fields_desc[1].i2m(eth, eth.src) +
pkt[16:])
return pkt + pay
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf(
'LLTD %src% > %dst% %LLTD.tos% - %LLTD.function%'
)
else:
return self.sprintf('LLTD %tos% - %function%')
def hashret(self):
tos, function = self.tos, self.function
return "%c%c" % self.answer_hashret.get((tos, function),
(tos, function))
def answers(self, other):
if not isinstance(other, LLTD):
return False
if self.tos == 0:
if self.function == 0 and isinstance(self.payload, LLTDDiscover) \
and len(self[LLTDDiscover].stations_list) == 1:
# "Topology discovery - Discover" with one MAC address
# discovered answers a "Quick discovery - Hello"
return other.tos == 1 and \
other.function == 1 and \
LLTDAttributeHostID in other and \
other[LLTDAttributeHostID].mac == \
self[LLTDDiscover].stations_list[0]
elif self.function == 12:
# "Topology discovery - QueryLargeTlvResp" answers
# "Topology discovery - QueryLargeTlv" with same .seq
# value
return other.tos == 0 and other.function == 11 \
and other.seq == self.seq
elif self.tos == 1:
if self.function == 1 and isinstance(self.payload, LLTDHello):
# "Quick discovery - Hello" answers a "Topology
# discovery - Discover"
return other.tos == 0 and other.function == 0 and \
other.real_src == self.current_mapper_address
return False
class LLTDHello(Packet):
name = "LLTD - Hello"
show_summary = False
fields_desc = [
ShortField("gen_number", 0),
MACField("current_mapper_address", ETHER_ANY),
MACField("apparent_mapper_address", ETHER_ANY),
]
class LLTDDiscover(Packet):
name = "LLTD - Discover"
fields_desc = [
ShortField("gen_number", 0),
FieldLenField("stations_count", None, count_of="stations_list",
fmt="H"),
FieldListField("stations_list", [], MACField("", ETHER_ANY),
count_from=lambda pkt: pkt.stations_count)
]
def mysummary(self):
return (self.sprintf("Stations: %stations_list%")
if self.stations_list else "No station", [LLTD])
class LLTDEmiteeDesc(Packet):
name = "LLTD - Emitee Desc"
fields_desc = [
ByteEnumField("type", 0, {0: "Train", 1: "Probe"}),
ByteField("pause", 0),
MACField("src", None),
MACField("dst", ETHER_ANY),
]
class LLTDEmit(Packet):
name = "LLTD - Emit"
fields_desc = [
FieldLenField("descs_count", None, count_of="descs_list",
fmt="H"),
PacketListField("descs_list", [], LLTDEmiteeDesc,
count_from=lambda pkt: pkt.descs_count),
]
def mysummary(self):
return ", ".join(desc.sprintf("%src% > %dst%")
for desc in self.descs_list), [LLTD]
class LLTDRecveeDesc(Packet):
name = "LLTD - Recvee Desc"
fields_desc = [
ShortEnumField("type", 0, {0: "Probe", 1: "ARP or ICMPv6"}),
MACField("real_src", ETHER_ANY),
MACField("ether_src", ETHER_ANY),
MACField("ether_dst", ETHER_ANY),
]
class LLTDQueryResp(Packet):
name = "LLTD - Query Response"
fields_desc = [
FlagsField("flags", 0, 2, "ME"),
BitField("descs_count", None, 14),
PacketListField("descs_list", [], LLTDRecveeDesc,
count_from=lambda pkt: pkt.descs_count),
]
def post_build(self, pkt, pay):
if self.descs_count is None:
# descs_count should be a FieldLenField but has an
# unsupported format (14 bits)
flags = ord(pkt[0]) & 0xc0
count = len(self.descs_list)
pkt = chr(flags + (count >> 8)) + chr(count % 256) + pkt[2:]
return pkt + pay
def mysummary(self):
return self.sprintf("%d response%s" % (
self.descs_count,
"s" if self.descs_count > 1 else "")), [LLTD]
class LLTDQueryLargeTlv(Packet):
name = "LLTD - Query Large Tlv"
fields_desc = [
ByteEnumField("type", 14, {
14: "Icon image",
17: "Friendly Name",
19: "Hardware ID",
22: "AP Association Table",
24: "Detailed Icon Image",
26: "Component Table",
28: "Repeater AP Table",
}),
ThreeBytesField("offset", 0),
]
def mysummary(self):
return self.sprintf("%type% (offset %offset%)"), [LLTD]
class LLTDQueryLargeTlvResp(Packet):
name = "LLTD - Query Large Tlv Response"
fields_desc = [
FlagsField("flags", 0, 2, "RM"),
BitField("len", None, 14),
StrLenField("value", "", length_from=lambda pkt: pkt.len)
]
def post_build(self, pkt, pay):
if self.len is None:
# len should be a FieldLenField but has an unsupported
# format (14 bits)
flags = ord(pkt[0]) & 0xc0
length = len(self.value)
pkt = chr(flags + (length >> 8)) + chr(length % 256) + pkt[2:]
return pkt + pay
def mysummary(self):
return self.sprintf("%%len%% bytes%s" % (
" (last)" if not self.flags & 2 else ""
)), [LLTD]
class LLTDAttribute(Packet):
name = "LLTD Attribute"
show_indent = False
show_summary = False
# section 2.2.1.1
fields_desc = [
ByteEnumField("type", 0, {
0: "End Of Property",
1: "Host ID",
2: "Characteristics",
3: "Physical Medium",
7: "IPv4 Address",
9: "802.11 Max Rate",
10: "Performance Counter Frequency",
12: "Link Speed",
14: "Icon Image",
15: "Machine Name",
18: "Device UUID",
20: "QoS Characteristics",
21: "802.11 Physical Medium",
24: "Detailed Icon Image",
}),
FieldLenField("len", None, length_of="value", fmt="B"),
StrLenField("value", "", length_from=lambda pkt: pkt.len),
]
@classmethod
def dispatch_hook(cls, _pkt=None, *_, **kargs):
if _pkt:
cmd = struct.unpack("B", _pkt[0])[0]
elif "type" in kargs:
cmd = kargs["type"]
if isinstance(cmd, basestring):
cmd = cls.fields_desc[0].s2i[cmd]
return SPECIFIC_CLASSES.get(cmd, cls)
SPECIFIC_CLASSES = {}
def _register_lltd_specific_class(*attr_types):
"""This can be used as a class decorator, but since we want to
support Python 2.5, we have to replace
@_register_lltd_specific_class(x[, y[, ...]])
class LLTDAttributeSpecific(LLTDAttribute):
[...]
by
class LLTDAttributeSpecific(LLTDAttribute):
[...]
LLTDAttributeSpecific = _register_lltd_specific_class(x[, y[, ...]])(
LLTDAttributeSpecific
)
"""
def _register(cls):
for attr_type in attr_types:
SPECIFIC_CLASSES[attr_type] = cls
type_fld = LLTDAttribute.fields_desc[0].copy()
type_fld.default = attr_types[0]
cls.fields_desc = [type_fld] + cls.fields_desc
return cls
return _register
class LLTDAttributeEOP(LLTDAttribute):
name = "LLTD Attribute - End Of Property"
fields_desc = []
LLTDAttributeEOP = _register_lltd_specific_class(0)(LLTDAttributeEOP)
class LLTDAttributeHostID(LLTDAttribute):
name = "LLTD Attribute - Host ID"
fields_desc = [
ByteField("len", 6),
MACField("mac", ETHER_ANY),
]
def mysummary(self):
return "ID: %s" % self.mac, [LLTD, LLTDAttributeMachineName]
LLTDAttributeHostID = _register_lltd_specific_class(1)(LLTDAttributeHostID)
class LLTDAttributeCharacteristics(LLTDAttribute):
name = "LLTD Attribute - Characteristics"
fields_desc = [
# According to MS doc, "this field MUST be set to 0x02". But
# according to MS implementation, that's wrong.
# ByteField("len", 2),
FieldLenField("len", None, length_of="reserved2", fmt="B",
adjust=lambda _, x: x + 2),
FlagsField("flags", 0, 5, "PXFML"),
BitField("reserved1", 0, 11),
StrLenField("reserved2", "", length_from=lambda x: x.len - 2)
]
LLTDAttributeCharacteristics = _register_lltd_specific_class(2)(
LLTDAttributeCharacteristics
)
class LLTDAttributePhysicalMedium(LLTDAttribute):
name = "LLTD Attribute - Physical Medium"
fields_desc = [
ByteField("len", 4),
IntEnumField("medium", 6, {
# https://www.iana.org/assignments/ianaiftype-mib/ianaiftype-mib
1: "other",
2: "regular1822",
3: "hdh1822",
4: "ddnX25",
5: "rfc877x25",
6: "ethernetCsmacd",
7: "iso88023Csmacd",
8: "iso88024TokenBus",
9: "iso88025TokenRing",
10: "iso88026Man",
11: "starLan",
12: "proteon10Mbit",
13: "proteon80Mbit",
14: "hyperchannel",
15: "fddi",
16: "lapb",
17: "sdlc",
18: "ds1",
19: "e1",
20: "basicISDN",
21: "primaryISDN",
22: "propPointToPointSerial",
23: "ppp",
24: "softwareLoopback",
25: "eon",
26: "ethernet3Mbit",
27: "nsip",
28: "slip",
29: "ultra",
30: "ds3",
31: "sip",
32: "frameRelay",
33: "rs232",
34: "para",
35: "arcnet",
36: "arcnetPlus",
37: "atm",
38: "miox25",
39: "sonet",
40: "x25ple",
41: "iso88022llc",
42: "localTalk",
43: "smdsDxi",
44: "frameRelayService",
45: "v35",
46: "hssi",
47: "hippi",
48: "modem",
49: "aal5",
50: "sonetPath",
51: "sonetVT",
52: "smdsIcip",
53: "propVirtual",
54: "propMultiplexor",
55: "ieee80212",
56: "fibreChannel",
57: "hippiInterface",
58: "frameRelayInterconnect",
59: "aflane8023",
60: "aflane8025",
61: "cctEmul",
62: "fastEther",
63: "isdn",
64: "v11",
65: "v36",
66: "g703at64k",
67: "g703at2mb",
68: "qllc",
69: "fastEtherFX",
70: "channel",
71: "ieee80211",
72: "ibm370parChan",
73: "escon",
74: "dlsw",
75: "isdns",
76: "isdnu",
77: "lapd",
78: "ipSwitch",
79: "rsrb",
80: "atmLogical",
81: "ds0",
82: "ds0Bundle",
83: "bsc",
84: "async",
85: "cnr",
86: "iso88025Dtr",
87: "eplrs",
88: "arap",
89: "propCnls",
90: "hostPad",
91: "termPad",
92: "frameRelayMPI",
93: "x213",
94: "adsl",
95: "radsl",
96: "sdsl",
97: "vdsl",
98: "iso88025CRFPInt",
99: "myrinet",
100: "voiceEM",
101: "voiceFXO",
102: "voiceFXS",
103: "voiceEncap",
104: "voiceOverIp",
105: "atmDxi",
106: "atmFuni",
107: "atmIma",
108: "pppMultilinkBundle",
109: "ipOverCdlc",
110: "ipOverClaw",
111: "stackToStack",
112: "virtualIpAddress",
113: "mpc",
114: "ipOverAtm",
115: "iso88025Fiber",
116: "tdlc",
117: "gigabitEthernet",
118: "hdlc",
119: "lapf",
120: "v37",
121: "x25mlp",
122: "x25huntGroup",
123: "transpHdlc",
124: "interleave",
125: "fast",
126: "ip",
127: "docsCableMaclayer",
128: "docsCableDownstream",
129: "docsCableUpstream",
130: "a12MppSwitch",
131: "tunnel",
132: "coffee",
133: "ces",
134: "atmSubInterface",
135: "l2vlan",
136: "l3ipvlan",
137: "l3ipxvlan",
138: "digitalPowerline",
139: "mediaMailOverIp",
140: "dtm",
141: "dcn",
142: "ipForward",
143: "msdsl",
144: "ieee1394",
145: "if-gsn",
146: "dvbRccMacLayer",
147: "dvbRccDownstream",
148: "dvbRccUpstream",
149: "atmVirtual",
150: "mplsTunnel",
151: "srp",
152: "voiceOverAtm",
153: "voiceOverFrameRelay",
154: "idsl",
155: "compositeLink",
156: "ss7SigLink",
157: "propWirelessP2P",
158: "frForward",
159: "rfc1483",
160: "usb",
161: "ieee8023adLag",
162: "bgppolicyaccounting",
163: "frf16MfrBundle",
164: "h323Gatekeeper",
165: "h323Proxy",
166: "mpls",
167: "mfSigLink",
168: "hdsl2",
169: "shdsl",
170: "ds1FDL",
171: "pos",
172: "dvbAsiIn",
173: "dvbAsiOut",
174: "plc",
175: "nfas",
176: "tr008",
177: "gr303RDT",
178: "gr303IDT",
179: "isup",
180: "propDocsWirelessMaclayer",
181: "propDocsWirelessDownstream",
182: "propDocsWirelessUpstream",
183: "hiperlan2",
184: "propBWAp2Mp",
185: "sonetOverheadChannel",
186: "digitalWrapperOverheadChannel",
187: "aal2",
188: "radioMAC",
189: "atmRadio",
190: "imt",
191: "mvl",
192: "reachDSL",
193: "frDlciEndPt",
194: "atmVciEndPt",
195: "opticalChannel",
196: "opticalTransport",
197: "propAtm",
198: "voiceOverCable",
199: "infiniband",
200: "teLink",
201: "q2931",
202: "virtualTg",
203: "sipTg",
204: "sipSig",
205: "docsCableUpstreamChannel",
206: "econet",
207: "pon155",
208: "pon622",
209: "bridge",
210: "linegroup",
211: "voiceEMFGD",
212: "voiceFGDEANA",
213: "voiceDID",
214: "mpegTransport",
215: "sixToFour",
216: "gtp",
217: "pdnEtherLoop1",
218: "pdnEtherLoop2",
219: "opticalChannelGroup",
220: "homepna",
221: "gfp",
222: "ciscoISLvlan",
223: "actelisMetaLOOP",
224: "fcipLink",
225: "rpr",
226: "qam",
227: "lmp",
228: "cblVectaStar",
229: "docsCableMCmtsDownstream",
230: "adsl2",
231: "macSecControlledIF",
232: "macSecUncontrolledIF",
233: "aviciOpticalEther",
234: "atmbond",
235: "voiceFGDOS",
236: "mocaVersion1",
237: "ieee80216WMAN",
238: "adsl2plus",
239: "dvbRcsMacLayer",
240: "dvbTdm",
241: "dvbRcsTdma",
242: "x86Laps",
243: "wwanPP",
244: "wwanPP2",
245: "voiceEBS",
246: "ifPwType",
247: "ilan",
248: "pip",
249: "aluELP",
250: "gpon",
251: "vdsl2",
252: "capwapDot11Profile",
253: "capwapDot11Bss",
254: "capwapWtpVirtualRadio",
255: "bits",
256: "docsCableUpstreamRfPort",
257: "cableDownstreamRfPort",
258: "vmwareVirtualNic",
259: "ieee802154",
260: "otnOdu",
261: "otnOtu",
262: "ifVfiType",
263: "g9981",
264: "g9982",
265: "g9983",
266: "aluEpon",
267: "aluEponOnu",
268: "aluEponPhysicalUni",
269: "aluEponLogicalLink",
271: "aluGponPhysicalUni",
272: "vmwareNicTeam",
277: "docsOfdmDownstream",
278: "docsOfdmaUpstream",
279: "gfast",
280: "sdci",
}),
]
LLTDAttributePhysicalMedium = _register_lltd_specific_class(3)(
LLTDAttributePhysicalMedium
)
class LLTDAttributeIPv4Address(LLTDAttribute):
name = "LLTD Attribute - IPv4 Address"
fields_desc = [
ByteField("len", 4),
IPField("ipv4", "0.0.0.0"),
]
LLTDAttributeIPv4Address = _register_lltd_specific_class(7)(
LLTDAttributeIPv4Address
)
class LLTDAttributeIPv6Address(LLTDAttribute):
name = "LLTD Attribute - IPv6 Address"
fields_desc = [
ByteField("len", 16),
IP6Field("ipv6", "::"),
]
LLTDAttributeIPv6Address = _register_lltd_specific_class(8)(
LLTDAttributeIPv6Address
)
class LLTDAttribute80211MaxRate(LLTDAttribute):
name = "LLTD Attribute - 802.11 Max Rate"
fields_desc = [
ByteField("len", 2),
ShortField("rate", 0),
]
LLTDAttribute80211MaxRate = _register_lltd_specific_class(9)(
LLTDAttribute80211MaxRate
)
class LLTDAttributePerformanceCounterFrequency(LLTDAttribute):
name = "LLTD Attribute - Performance Counter Frequency"
fields_desc = [
ByteField("len", 8),
LongField("freq", 0),
]
LLTDAttributePerformanceCounterFrequency = _register_lltd_specific_class(10)(
LLTDAttributePerformanceCounterFrequency
)
class LLTDAttributeLinkSpeed(LLTDAttribute):
name = "LLTD Attribute - Link Speed"
fields_desc = [
ByteField("len", 4),
IntField("speed", 0),
]
LLTDAttributeLinkSpeed = _register_lltd_specific_class(12)(
LLTDAttributeLinkSpeed
)
class LLTDAttributeLargeTLV(LLTDAttribute):
name = "LLTD Attribute - Large TLV"
fields_desc = [
ByteField("len", 0),
]
LLTDAttributeLargeTLV = _register_lltd_specific_class(14, 24, 26)(
LLTDAttributeLargeTLV
)
class LLTDAttributeMachineName(LLTDAttribute):
name = "LLTD Attribute - Machine Name"
fields_desc = [
FieldLenField("len", None, length_of="hostname", fmt="B"),
StrLenFieldUtf16("hostname", "", length_from=lambda pkt: pkt.len),
]
def mysummary(self):
return (self.sprintf("Hostname: %r" % self.hostname),
[LLTD, LLTDAttributeHostID])
LLTDAttributeMachineName = _register_lltd_specific_class(15)(
LLTDAttributeMachineName
)
class LLTDAttributeDeviceUUID(LLTDAttribute):
name = "LLTD Attribute - Device UUID"
fields_desc = [
FieldLenField("len", None, length_of="value", fmt="B"),
StrLenField("uuid", "\x00" * 16, length_from=lambda pkt: pkt.len),
]
LLTDAttributeDeviceUUID = _register_lltd_specific_class(18)(
LLTDAttributeDeviceUUID
)
class LLTDAttributeQOSCharacteristics(LLTDAttribute):
name = "LLTD Attribute - QoS Characteristics"
fields_desc = [
ByteField("len", 4),
FlagsField("flags", 0, 3, "EQP"),
BitField("reserved1", 0, 13),
ShortField("reserved2", 0),
]
LLTDAttributeQOSCharacteristics = _register_lltd_specific_class(20)(
LLTDAttributeQOSCharacteristics
)
class LLTDAttribute80211PhysicalMedium(LLTDAttribute):
name = "LLTD Attribute - 802.11 Physical Medium"
fields_desc = [
ByteField("len", 1),
ByteEnumField("medium", 0, {
0: "Unknown",
1: "FHSS 2.4 GHz",
2: "DSSS 2.4 GHz",
3: "IR Baseband",
4: "OFDM 5 GHz",
5: "HRDSSS",
6: "ERP",
}),
]
LLTDAttribute80211PhysicalMedium = _register_lltd_specific_class(21)(
LLTDAttribute80211PhysicalMedium
)
class LLTDAttributeSeesList(LLTDAttribute):
name = "LLTD Attribute - Sees List Working Set"
fields_desc = [
ByteField("len", 2),
ShortField("max_entries", 0),
]
LLTDAttributeSeesList = _register_lltd_specific_class(25)(
LLTDAttributeSeesList
)
bind_layers(Ether, LLTD, type=0x88d9)
bind_layers(LLTD, LLTDDiscover, tos=0, function=0)
bind_layers(LLTD, LLTDDiscover, tos=1, function=0)
bind_layers(LLTD, LLTDHello, tos=0, function=1)
bind_layers(LLTD, LLTDHello, tos=1, function=1)
bind_layers(LLTD, LLTDEmit, tos=0, function=2)
bind_layers(LLTD, LLTDQueryResp, tos=0, function=7)
bind_layers(LLTD, LLTDQueryLargeTlv, tos=0, function=11)
bind_layers(LLTD, LLTDQueryLargeTlvResp, tos=0, function=12)
bind_layers(LLTDHello, LLTDAttribute)
bind_layers(LLTDAttribute, LLTDAttribute)
bind_layers(LLTDAttribute, Padding, type=0)
bind_layers(LLTDEmiteeDesc, Padding)
bind_layers(LLTDRecveeDesc, Padding)
# Utils
########
class LargeTlvBuilder(object):
"""An object to build content fetched through LLTDQueryLargeTlv /
LLTDQueryLargeTlvResp packets.
Usable with a PacketList() object:
>>> p = LargeTlvBuilder()
>>> p.parse(rdpcap('capture_file.cap'))
Or during a network capture:
>>> p = LargeTlvBuilder()
>>> sniff(filter="ether proto 0x88d9", prn=p.parse)
To get the result, use .get_data()
"""
def __init__(self):
self.types_offsets = {}
self.data = {}
def parse(self, plist):
"""Update the builder using the provided `plist`. `plist` can
be either a Packet() or a PacketList().
"""
if not isinstance(plist, PacketList):
plist = PacketList(plist)
for pkt in plist[LLTD]:
if LLTDQueryLargeTlv in pkt:
key = "%s:%s:%d" % (pkt.real_dst, pkt.real_src, pkt.seq)
self.types_offsets[key] = (pkt[LLTDQueryLargeTlv].type,
pkt[LLTDQueryLargeTlv].offset)
elif LLTDQueryLargeTlvResp in pkt:
try:
key = "%s:%s:%d" % (pkt.real_src, pkt.real_dst, pkt.seq)
content, offset = self.types_offsets[key]
except KeyError:
continue
loc = slice(offset, offset + pkt[LLTDQueryLargeTlvResp].len)
key = "%s > %s [%s]" % (
pkt.real_src, pkt.real_dst,
LLTDQueryLargeTlv.fields_desc[0].i2s.get(content, content),
)
data = self.data.setdefault(key, array("B"))
datalen = len(data)
if datalen < loc.stop:
data.extend(array("B", "\x00" * (loc.stop - datalen)))
data[loc] = array("B", pkt[LLTDQueryLargeTlvResp].value)
def get_data(self):
"""Returns a dictionary object, keys are strings "source >
destincation [content type]", and values are the content
fetched, also as a string.
"""
return dict((key, "".join(chr(byte) for byte in data))
for key, data in self.data.iteritems())
|
rcarrillocruz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aos/aos_template.py
|
36
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_template
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Template
description:
- Apstra AOS Template module let you manage your Template easily. You can create
create and delete Template by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Template to manage.
Only one of I(name), I(id) or I(src) can be set.
id:
description:
- AOS Id of the Template to manage (can't be used to create a new Template),
Only one of I(name), I(id) or I(src) can be set.
content:
description:
- Datastructure of the Template to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Template (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Check if an Template exist by name"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: present
- name: "Check if an Template exist by ID"
aos_template:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: present
- name: "Delete an Template by name"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: absent
- name: "Delete an Template by id"
aos_template:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
- name: "Access Template 1/3"
aos_template:
session: "{{ aos_session }}"
name: "my-template"
state: present
register: template
- name: "Save Template into a JSON file 2/3"
copy:
content: "{{ template.value | to_nice_json }}"
dest: template_saved.json
- name: "Save Template into a YAML file 2/3"
copy:
content: "{{ template.value | to_nice_yaml }}"
dest: template_saved.yaml
- name: "Load Template from File (Json)"
aos_template:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/template_saved.json') }}"
state: present
- name: "Load Template from File (yaml)"
aos_template:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/template_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Template
returned: always
type: str
sample: My-Template
id:
description: AOS unique ID assigned to the Template
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import time
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def template_absent(module, aos, my_template):
margs = module.params
# If the module do not exist, return directly
if my_template.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Template
if not module.check_mode:
try:
# need to way 1sec before delete to workaround a current limitation in AOS
time.sleep(1)
my_template.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the Template")
module.exit_json( changed=True,
name=my_template.name,
id=my_template.id,
value={} )
def template_present(module, aos, my_template):
margs = module.params
# if content is defined, create object from Content
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.DesignTemplates, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if template doesn't exist already, create a new one
if my_template.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
# if module already exist, just return it
module.exit_json( changed=False,
name=my_template.name,
id=my_template.id,
value=my_template.value )
#########################################################
# Main Function
#########################################################
def aos_template(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
try:
my_template = find_collection_item(aos.DesignTemplates,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
template_absent(module, aos, my_template)
elif margs['state'] == 'present':
template_present(module, aos, my_template)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_template(module)
if __name__ == "__main__":
main()
|
mongodb/mongo-python-driver
|
refs/heads/master
|
test/unified_format.py
|
2
|
# Copyright 2020-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified test format runner.
https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst
"""
import copy
import datetime
import functools
import os
import re
import sys
import types
from collections import abc
from bson import json_util, Code, Decimal128, DBRef, SON, Int64, MaxKey, MinKey
from bson.binary import Binary
from bson.objectid import ObjectId
from bson.regex import Regex, RE_TYPE
from gridfs import GridFSBucket
from pymongo import ASCENDING, MongoClient
from pymongo.client_session import ClientSession, TransactionOptions, _TxnState
from pymongo.change_stream import ChangeStream
from pymongo.collection import Collection
from pymongo.database import Database
from pymongo.errors import (
BulkWriteError, ConnectionFailure, ConfigurationError, InvalidOperation,
NotPrimaryError, PyMongoError)
from pymongo.monitoring import (
CommandFailedEvent, CommandListener, CommandStartedEvent,
CommandSucceededEvent, _SENSITIVE_COMMANDS, PoolCreatedEvent,
PoolReadyEvent, PoolClearedEvent, PoolClosedEvent, ConnectionCreatedEvent,
ConnectionReadyEvent, ConnectionClosedEvent,
ConnectionCheckOutStartedEvent, ConnectionCheckOutFailedEvent,
ConnectionCheckedOutEvent, ConnectionCheckedInEvent)
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.results import BulkWriteResult
from pymongo.server_api import ServerApi
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.utils import (
camel_to_snake, get_pool, rs_or_single_client, single_client,
snake_to_camel, CMAPListener)
from test.version import Version
from test.utils import (
camel_to_snake_args, parse_collection_options, parse_spec_options,
prepare_spec_arguments)
JSON_OPTS = json_util.JSONOptions(tz_aware=False)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Vendored from six: https://github.com/benjaminp/six/blob/master/six.py
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def is_run_on_requirement_satisfied(requirement):
topology_satisfied = True
req_topologies = requirement.get('topologies')
if req_topologies:
topology_satisfied = client_context.is_topology_type(
req_topologies)
server_version = Version(*client_context.version[:3])
min_version_satisfied = True
req_min_server_version = requirement.get('minServerVersion')
if req_min_server_version:
min_version_satisfied = Version.from_string(
req_min_server_version) <= server_version
max_version_satisfied = True
req_max_server_version = requirement.get('maxServerVersion')
if req_max_server_version:
max_version_satisfied = Version.from_string(
req_max_server_version) >= server_version
params_satisfied = True
params = requirement.get('serverParameters')
if params:
for param, val in params.items():
if param not in client_context.server_parameters:
params_satisfied = False
elif client_context.server_parameters[param] != val:
params_satisfied = False
auth_satisfied = True
req_auth = requirement.get('auth')
if req_auth is not None:
if req_auth:
auth_satisfied = client_context.auth_enabled
else:
auth_satisfied = not client_context.auth_enabled
return (topology_satisfied and min_version_satisfied and
max_version_satisfied and params_satisfied and auth_satisfied)
def parse_collection_or_database_options(options):
return parse_collection_options(options)
def parse_bulk_write_result(result):
upserted_ids = {str(int_idx): result.upserted_ids[int_idx]
for int_idx in result.upserted_ids}
return {
'deletedCount': result.deleted_count,
'insertedCount': result.inserted_count,
'matchedCount': result.matched_count,
'modifiedCount': result.modified_count,
'upsertedCount': result.upserted_count,
'upsertedIds': upserted_ids}
def parse_bulk_write_error_result(error):
write_result = BulkWriteResult(error.details, True)
return parse_bulk_write_result(write_result)
class NonLazyCursor(object):
"""A find cursor proxy that creates the remote cursor when initialized."""
def __init__(self, find_cursor):
self.find_cursor = find_cursor
# Create the server side cursor.
self.first_result = next(find_cursor, None)
def __next__(self):
if self.first_result is not None:
first = self.first_result
self.first_result = None
return first
return next(self.find_cursor)
def close(self):
self.find_cursor.close()
class EventListenerUtil(CMAPListener, CommandListener):
def __init__(self, observe_events, ignore_commands,
observe_sensitive_commands):
self._event_types = set(name.lower() for name in observe_events)
if observe_sensitive_commands:
self._ignore_commands = set(ignore_commands)
else:
self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands)
self._ignore_commands.add('configurefailpoint')
super(EventListenerUtil, self).__init__()
def get_events(self, event_type):
if event_type == 'command':
return [e for e in self.events if 'Command' in type(e).__name__]
return [e for e in self.events if 'Command' not in type(e).__name__]
def add_event(self, event):
if type(event).__name__.lower() in self._event_types:
super(EventListenerUtil, self).add_event(event)
def _command_event(self, event):
if event.command_name.lower() not in self._ignore_commands:
self.add_event(event)
def started(self, event):
self._command_event(event)
def succeeded(self, event):
self._command_event(event)
def failed(self, event):
self._command_event(event)
class EntityMapUtil(object):
"""Utility class that implements an entity map as per the unified
test format specification."""
def __init__(self, test_class):
self._entities = {}
self._listeners = {}
self._session_lsids = {}
self.test = test_class
def __getitem__(self, item):
try:
return self._entities[item]
except KeyError:
self.test.fail('Could not find entity named %s in map' % (
item,))
def __setitem__(self, key, value):
if not isinstance(key, str):
self.test.fail(
'Expected entity name of type str, got %s' % (type(key)))
if key in self._entities:
self.test.fail('Entity named %s already in map' % (key,))
self._entities[key] = value
def _create_entity(self, entity_spec):
if len(entity_spec) != 1:
self.test.fail(
"Entity spec %s did not contain exactly one top-level key" % (
entity_spec,))
entity_type, spec = next(iter(entity_spec.items()))
if entity_type == 'client':
kwargs = {}
observe_events = spec.get('observeEvents', [])
ignore_commands = spec.get('ignoreCommandMonitoringEvents', [])
observe_sensitive_commands = spec.get(
'observeSensitiveCommands', False)
# TODO: SUPPORT storeEventsAsEntities
if len(observe_events) or len(ignore_commands):
ignore_commands = [cmd.lower() for cmd in ignore_commands]
listener = EventListenerUtil(
observe_events, ignore_commands, observe_sensitive_commands)
self._listeners[spec['id']] = listener
kwargs['event_listeners'] = [listener]
if spec.get('useMultipleMongoses'):
if client_context.load_balancer:
kwargs['h'] = client_context.MULTI_MONGOS_LB_URI
elif client_context.is_mongos:
kwargs['h'] = client_context.mongos_seeds()
kwargs.update(spec.get('uriOptions', {}))
server_api = spec.get('serverApi')
if server_api:
kwargs['server_api'] = ServerApi(
server_api['version'], strict=server_api.get('strict'),
deprecation_errors=server_api.get('deprecationErrors'))
client = rs_or_single_client(**kwargs)
self[spec['id']] = client
self.test.addCleanup(client.close)
return
elif entity_type == 'database':
client = self[spec['client']]
if not isinstance(client, MongoClient):
self.test.fail(
'Expected entity %s to be of type MongoClient, got %s' % (
spec['client'], type(client)))
options = parse_collection_or_database_options(
spec.get('databaseOptions', {}))
self[spec['id']] = client.get_database(
spec['databaseName'], **options)
return
elif entity_type == 'collection':
database = self[spec['database']]
if not isinstance(database, Database):
self.test.fail(
'Expected entity %s to be of type Database, got %s' % (
spec['database'], type(database)))
options = parse_collection_or_database_options(
spec.get('collectionOptions', {}))
self[spec['id']] = database.get_collection(
spec['collectionName'], **options)
return
elif entity_type == 'session':
client = self[spec['client']]
if not isinstance(client, MongoClient):
self.test.fail(
'Expected entity %s to be of type MongoClient, got %s' % (
spec['client'], type(client)))
opts = camel_to_snake_args(spec.get('sessionOptions', {}))
if 'default_transaction_options' in opts:
txn_opts = parse_spec_options(
opts['default_transaction_options'])
txn_opts = TransactionOptions(**txn_opts)
opts = copy.deepcopy(opts)
opts['default_transaction_options'] = txn_opts
session = client.start_session(**dict(opts))
self[spec['id']] = session
self._session_lsids[spec['id']] = copy.deepcopy(session.session_id)
self.test.addCleanup(session.end_session)
return
elif entity_type == 'bucket':
# TODO: implement the 'bucket' entity type
self.test.skipTest(
'GridFS is not currently supported (PYTHON-2459)')
self.test.fail(
'Unable to create entity of unknown type %s' % (entity_type,))
def create_entities_from_spec(self, entity_spec):
for spec in entity_spec:
self._create_entity(spec)
def get_listener_for_client(self, client_name):
client = self[client_name]
if not isinstance(client, MongoClient):
self.test.fail(
'Expected entity %s to be of type MongoClient, got %s' % (
client_name, type(client)))
listener = self._listeners.get(client_name)
if not listener:
self.test.fail(
'No listeners configured for client %s' % (client_name,))
return listener
def get_lsid_for_session(self, session_name):
session = self[session_name]
if not isinstance(session, ClientSession):
self.test.fail(
'Expected entity %s to be of type ClientSession, got %s' % (
session_name, type(session)))
try:
return session.session_id
except InvalidOperation:
# session has been closed.
return self._session_lsids[session_name]
binary_types = (Binary, bytes)
long_types = (Int64,)
unicode_type = str
BSON_TYPE_ALIAS_MAP = {
# https://docs.mongodb.com/manual/reference/operator/query/type/
# https://pymongo.readthedocs.io/en/stable/api/bson/index.html
'double': (float,),
'string': (str,),
'object': (abc.Mapping,),
'array': (abc.MutableSequence,),
'binData': binary_types,
'undefined': (type(None),),
'objectId': (ObjectId,),
'bool': (bool,),
'date': (datetime.datetime,),
'null': (type(None),),
'regex': (Regex, RE_TYPE),
'dbPointer': (DBRef,),
'javascript': (unicode_type, Code),
'symbol': (unicode_type,),
'javascriptWithScope': (unicode_type, Code),
'int': (int,),
'long': (Int64,),
'decimal': (Decimal128,),
'maxKey': (MaxKey,),
'minKey': (MinKey,),
}
class MatchEvaluatorUtil(object):
"""Utility class that implements methods for evaluating matches as per
the unified test format specification."""
def __init__(self, test_class):
self.test = test_class
def _operation_exists(self, spec, actual, key_to_compare):
if spec is True:
self.test.assertIn(key_to_compare, actual)
elif spec is False:
self.test.assertNotIn(key_to_compare, actual)
else:
self.test.fail(
'Expected boolean value for $$exists operator, got %s' % (
spec,))
def __type_alias_to_type(self, alias):
if alias not in BSON_TYPE_ALIAS_MAP:
self.test.fail('Unrecognized BSON type alias %s' % (alias,))
return BSON_TYPE_ALIAS_MAP[alias]
def _operation_type(self, spec, actual, key_to_compare):
if isinstance(spec, abc.MutableSequence):
permissible_types = tuple([
t for alias in spec for t in self.__type_alias_to_type(alias)])
else:
permissible_types = self.__type_alias_to_type(spec)
self.test.assertIsInstance(
actual[key_to_compare], permissible_types)
def _operation_matchesEntity(self, spec, actual, key_to_compare):
expected_entity = self.test.entity_map[spec]
self.test.assertIsInstance(expected_entity, abc.Mapping)
self.test.assertEqual(expected_entity, actual[key_to_compare])
def _operation_matchesHexBytes(self, spec, actual, key_to_compare):
raise NotImplementedError
def _operation_unsetOrMatches(self, spec, actual, key_to_compare):
if key_to_compare is None and not actual:
# top-level document can be None when unset
return
if key_to_compare not in actual:
# we add a dummy value for the compared key to pass map size check
actual[key_to_compare] = 'dummyValue'
return
self.match_result(spec, actual[key_to_compare], in_recursive_call=True)
def _operation_sessionLsid(self, spec, actual, key_to_compare):
expected_lsid = self.test.entity_map.get_lsid_for_session(spec)
self.test.assertEqual(expected_lsid, actual[key_to_compare])
def _evaluate_special_operation(self, opname, spec, actual,
key_to_compare):
method_name = '_operation_%s' % (opname.strip('$'),)
try:
method = getattr(self, method_name)
except AttributeError:
self.test.fail(
'Unsupported special matching operator %s' % (opname,))
else:
method(spec, actual, key_to_compare)
def _evaluate_if_special_operation(self, expectation, actual,
key_to_compare=None):
"""Returns True if a special operation is evaluated, False
otherwise. If the ``expectation`` map contains a single key,
value pair we check it for a special operation.
If given, ``key_to_compare`` is assumed to be the key in
``expectation`` whose corresponding value needs to be
evaluated for a possible special operation. ``key_to_compare``
is ignored when ``expectation`` has only one key."""
if not isinstance(expectation, abc.Mapping):
return False
is_special_op, opname, spec = False, False, False
if key_to_compare is not None:
if key_to_compare.startswith('$$'):
is_special_op = True
opname = key_to_compare
spec = expectation[key_to_compare]
key_to_compare = None
else:
nested = expectation[key_to_compare]
if isinstance(nested, abc.Mapping) and len(nested) == 1:
opname, spec = next(iter(nested.items()))
if opname.startswith('$$'):
is_special_op = True
elif len(expectation) == 1:
opname, spec = next(iter(expectation.items()))
if opname.startswith('$$'):
is_special_op = True
key_to_compare = None
if is_special_op:
self._evaluate_special_operation(
opname=opname,
spec=spec,
actual=actual,
key_to_compare=key_to_compare)
return True
return False
def _match_document(self, expectation, actual, is_root):
if self._evaluate_if_special_operation(expectation, actual):
return
self.test.assertIsInstance(actual, abc.Mapping)
for key, value in expectation.items():
if self._evaluate_if_special_operation(expectation, actual, key):
continue
self.test.assertIn(key, actual)
self.match_result(value, actual[key], in_recursive_call=True)
if not is_root:
expected_keys = set(expectation.keys())
for key, value in expectation.items():
if value == {'$$exists': False}:
expected_keys.remove(key)
self.test.assertEqual(expected_keys, set(actual.keys()))
def match_result(self, expectation, actual,
in_recursive_call=False):
if isinstance(expectation, abc.Mapping):
return self._match_document(
expectation, actual, is_root=not in_recursive_call)
if isinstance(expectation, abc.MutableSequence):
self.test.assertIsInstance(actual, abc.MutableSequence)
for e, a in zip(expectation, actual):
if isinstance(e, abc.Mapping):
self._match_document(
e, a, is_root=not in_recursive_call)
else:
self.match_result(e, a, in_recursive_call=True)
return
# account for flexible numerics in element-wise comparison
if (isinstance(expectation, int) or
isinstance(expectation, float)):
self.test.assertEqual(expectation, actual)
else:
self.test.assertIsInstance(actual, type(expectation))
self.test.assertEqual(expectation, actual)
def assertHasServiceId(self, spec, actual):
if 'hasServiceId' in spec:
if spec.get('hasServiceId'):
self.test.assertIsNotNone(actual.service_id)
self.test.assertIsInstance(actual.service_id, ObjectId)
else:
self.test.assertIsNone(actual.service_id)
def match_event(self, event_type, expectation, actual):
name, spec = next(iter(expectation.items()))
# every command event has the commandName field
if event_type == 'command':
command_name = spec.get('commandName')
if command_name:
self.test.assertEqual(command_name, actual.command_name)
if name == 'commandStartedEvent':
self.test.assertIsInstance(actual, CommandStartedEvent)
command = spec.get('command')
database_name = spec.get('databaseName')
if command:
if actual.command_name == 'update':
# TODO: remove this once PYTHON-1744 is done.
# Add upsert and multi fields back into expectations.
for update in command.get('updates', []):
update.setdefault('upsert', False)
update.setdefault('multi', False)
self.match_result(command, actual.command)
if database_name:
self.test.assertEqual(
database_name, actual.database_name)
self.assertHasServiceId(spec, actual)
elif name == 'commandSucceededEvent':
self.test.assertIsInstance(actual, CommandSucceededEvent)
reply = spec.get('reply')
if reply:
self.match_result(reply, actual.reply)
self.assertHasServiceId(spec, actual)
elif name == 'commandFailedEvent':
self.test.assertIsInstance(actual, CommandFailedEvent)
self.assertHasServiceId(spec, actual)
elif name == 'poolCreatedEvent':
self.test.assertIsInstance(actual, PoolCreatedEvent)
elif name == 'poolReadyEvent':
self.test.assertIsInstance(actual, PoolReadyEvent)
elif name == 'poolClearedEvent':
self.test.assertIsInstance(actual, PoolClearedEvent)
self.assertHasServiceId(spec, actual)
elif name == 'poolClosedEvent':
self.test.assertIsInstance(actual, PoolClosedEvent)
elif name == 'connectionCreatedEvent':
self.test.assertIsInstance(actual, ConnectionCreatedEvent)
elif name == 'connectionReadyEvent':
self.test.assertIsInstance(actual, ConnectionReadyEvent)
elif name == 'connectionClosedEvent':
self.test.assertIsInstance(actual, ConnectionClosedEvent)
if 'reason' in spec:
self.test.assertEqual(actual.reason, spec['reason'])
elif name == 'connectionCheckOutStartedEvent':
self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent)
elif name == 'connectionCheckOutFailedEvent':
self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent)
if 'reason' in spec:
self.test.assertEqual(actual.reason, spec['reason'])
elif name == 'connectionCheckedOutEvent':
self.test.assertIsInstance(actual, ConnectionCheckedOutEvent)
elif name == 'connectionCheckedInEvent':
self.test.assertIsInstance(actual, ConnectionCheckedInEvent)
else:
self.test.fail(
'Unsupported event type %s' % (name,))
def coerce_result(opname, result):
"""Convert a pymongo result into the spec's result format."""
if hasattr(result, 'acknowledged') and not result.acknowledged:
return {'acknowledged': False}
if opname == 'bulkWrite':
return parse_bulk_write_result(result)
if opname == 'insertOne':
return {'insertedId': result.inserted_id, 'insertedCount': 1}
if opname == 'insertMany':
res = {idx: _id for idx, _id in enumerate(result.inserted_ids)}
res['insertedCount'] = len(result.inserted_ids)
return res
if opname in ('deleteOne', 'deleteMany'):
return {'deletedCount': result.deleted_count}
if opname in ('updateOne', 'updateMany', 'replaceOne'):
return {
'matchedCount': result.matched_count,
'modifiedCount': result.modified_count,
'upsertedCount': 0 if result.upserted_id is None else 1,
}
return result
class UnifiedSpecTestMixinV1(IntegrationTest):
"""Mixin class to run test cases from test specification files.
Assumes that tests conform to the `unified test format
<https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst>`_.
Specification of the test suite being currently run is available as
a class attribute ``TEST_SPEC``.
"""
SCHEMA_VERSION = Version.from_string('1.5')
RUN_ON_LOAD_BALANCER = True
@staticmethod
def should_run_on(run_on_spec):
if not run_on_spec:
# Always run these tests.
return True
for req in run_on_spec:
if is_run_on_requirement_satisfied(req):
return True
return False
def insert_initial_data(self, initial_data):
for collection_data in initial_data:
coll_name = collection_data['collectionName']
db_name = collection_data['databaseName']
documents = collection_data['documents']
coll = self.client.get_database(db_name).get_collection(
coll_name, write_concern=WriteConcern(w="majority"))
coll.drop()
if len(documents) > 0:
coll.insert_many(documents)
else:
# ensure collection exists
result = coll.insert_one({})
coll.delete_one({'_id': result.inserted_id})
@classmethod
def setUpClass(cls):
# super call creates internal client cls.client
super(UnifiedSpecTestMixinV1, cls).setUpClass()
# process file-level runOnRequirements
run_on_spec = cls.TEST_SPEC.get('runOnRequirements', [])
if not cls.should_run_on(run_on_spec):
raise unittest.SkipTest(
'%s runOnRequirements not satisfied' % (cls.__name__,))
# add any special-casing for skipping tests here
if client_context.storage_engine == 'mmapv1':
if 'retryable-writes' in cls.TEST_SPEC['description']:
raise unittest.SkipTest(
"MMAPv1 does not support retryWrites=True")
def setUp(self):
super(UnifiedSpecTestMixinV1, self).setUp()
# process schemaVersion
# note: we check major schema version during class generation
# note: we do this here because we cannot run assertions in setUpClass
version = Version.from_string(self.TEST_SPEC['schemaVersion'])
self.assertLessEqual(
version, self.SCHEMA_VERSION,
'expected schema version %s or lower, got %s' % (
self.SCHEMA_VERSION, version))
# initialize internals
self.match_evaluator = MatchEvaluatorUtil(self)
def maybe_skip_test(self, spec):
# add any special-casing for skipping tests here
if client_context.storage_engine == 'mmapv1':
if 'Dirty explicit session is discarded' in spec['description']:
raise unittest.SkipTest(
"MMAPv1 does not support retryWrites=True")
elif 'Client side error in command starting transaction' in spec['description']:
raise unittest.SkipTest("Implement PYTHON-1894")
def process_error(self, exception, spec):
is_error = spec.get('isError')
is_client_error = spec.get('isClientError')
error_contains = spec.get('errorContains')
error_code = spec.get('errorCode')
error_code_name = spec.get('errorCodeName')
error_labels_contain = spec.get('errorLabelsContain')
error_labels_omit = spec.get('errorLabelsOmit')
expect_result = spec.get('expectResult')
if is_error:
# already satisfied because exception was raised
pass
if is_client_error:
# Connection errors are considered client errors.
if isinstance(exception, ConnectionFailure):
self.assertNotIsInstance(exception, NotPrimaryError)
elif isinstance(exception, (InvalidOperation, ConfigurationError)):
pass
else:
self.assertNotIsInstance(exception, PyMongoError)
if error_contains:
if isinstance(exception, BulkWriteError):
errmsg = str(exception.details).lower()
else:
errmsg = str(exception).lower()
self.assertIn(error_contains.lower(), errmsg)
if error_code:
self.assertEqual(
error_code, exception.details.get('code'))
if error_code_name:
self.assertEqual(
error_code_name, exception.details.get('codeName'))
if error_labels_contain:
labels = [err_label for err_label in error_labels_contain
if exception.has_error_label(err_label)]
self.assertEqual(labels, error_labels_contain)
if error_labels_omit:
for err_label in error_labels_omit:
if exception.has_error_label(err_label):
self.fail("Exception '%s' unexpectedly had label '%s'" % (
exception, err_label))
if expect_result:
if isinstance(exception, BulkWriteError):
result = parse_bulk_write_error_result(
exception)
self.match_evaluator.match_result(expect_result, result)
else:
self.fail("expectResult can only be specified with %s "
"exceptions" % (BulkWriteError,))
def __raise_if_unsupported(self, opname, target, *target_types):
if not isinstance(target, target_types):
self.fail('Operation %s not supported for entity '
'of type %s' % (opname, type(target)))
def __entityOperation_createChangeStream(self, target, *args, **kwargs):
if client_context.storage_engine == 'mmapv1':
self.skipTest("MMAPv1 does not support change streams")
self.__raise_if_unsupported(
'createChangeStream', target, MongoClient, Database, Collection)
stream = target.watch(*args, **kwargs)
self.addCleanup(stream.close)
return stream
def _clientOperation_createChangeStream(self, target, *args, **kwargs):
return self.__entityOperation_createChangeStream(
target, *args, **kwargs)
def _databaseOperation_createChangeStream(self, target, *args, **kwargs):
return self.__entityOperation_createChangeStream(
target, *args, **kwargs)
def _collectionOperation_createChangeStream(self, target, *args, **kwargs):
return self.__entityOperation_createChangeStream(
target, *args, **kwargs)
def _databaseOperation_runCommand(self, target, **kwargs):
self.__raise_if_unsupported('runCommand', target, Database)
# Ensure the first key is the command name.
ordered_command = SON([(kwargs.pop('command_name'), 1)])
ordered_command.update(kwargs['command'])
kwargs['command'] = ordered_command
return target.command(**kwargs)
def _databaseOperation_listCollections(self, target, *args, **kwargs):
if 'batch_size' in kwargs:
kwargs['cursor'] = {'batchSize': kwargs.pop('batch_size')}
cursor = target.list_collections(*args, **kwargs)
return list(cursor)
def __entityOperation_aggregate(self, target, *args, **kwargs):
self.__raise_if_unsupported('aggregate', target, Database, Collection)
return list(target.aggregate(*args, **kwargs))
def _databaseOperation_aggregate(self, target, *args, **kwargs):
return self.__entityOperation_aggregate(target, *args, **kwargs)
def _collectionOperation_aggregate(self, target, *args, **kwargs):
return self.__entityOperation_aggregate(target, *args, **kwargs)
def _collectionOperation_find(self, target, *args, **kwargs):
self.__raise_if_unsupported('find', target, Collection)
find_cursor = target.find(*args, **kwargs)
return list(find_cursor)
def _collectionOperation_createFindCursor(self, target, *args, **kwargs):
self.__raise_if_unsupported('find', target, Collection)
cursor = NonLazyCursor(target.find(*args, **kwargs))
self.addCleanup(cursor.close)
return cursor
def _collectionOperation_listIndexes(self, target, *args, **kwargs):
if 'batch_size' in kwargs:
self.skipTest('PyMongo does not support batch_size for '
'list_indexes')
return target.list_indexes(*args, **kwargs)
def _sessionOperation_withTransaction(self, target, *args, **kwargs):
if client_context.storage_engine == 'mmapv1':
self.skipTest('MMAPv1 does not support document-level locking')
self.__raise_if_unsupported('withTransaction', target, ClientSession)
return target.with_transaction(*args, **kwargs)
def _sessionOperation_startTransaction(self, target, *args, **kwargs):
if client_context.storage_engine == 'mmapv1':
self.skipTest('MMAPv1 does not support document-level locking')
self.__raise_if_unsupported('startTransaction', target, ClientSession)
return target.start_transaction(*args, **kwargs)
def _changeStreamOperation_iterateUntilDocumentOrError(self, target,
*args, **kwargs):
self.__raise_if_unsupported(
'iterateUntilDocumentOrError', target, ChangeStream)
return next(target)
def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs):
self.__raise_if_unsupported(
'iterateUntilDocumentOrError', target, NonLazyCursor)
return next(target)
def _cursor_close(self, target, *args, **kwargs):
self.__raise_if_unsupported('close', target, NonLazyCursor)
return target.close()
def run_entity_operation(self, spec):
target = self.entity_map[spec['object']]
opname = spec['name']
opargs = spec.get('arguments')
expect_error = spec.get('expectError')
save_as_entity = spec.get('saveResultAsEntity')
expect_result = spec.get('expectResult')
ignore = spec.get('ignoreResultAndError')
if ignore and (expect_error or save_as_entity or expect_result):
raise ValueError(
'ignoreResultAndError is incompatible with saveResultAsEntity'
', expectError, and expectResult')
if opargs:
arguments = parse_spec_options(copy.deepcopy(opargs))
prepare_spec_arguments(spec, arguments, camel_to_snake(opname),
self.entity_map, self.run_operations)
else:
arguments = tuple()
if isinstance(target, MongoClient):
method_name = '_clientOperation_%s' % (opname,)
elif isinstance(target, Database):
method_name = '_databaseOperation_%s' % (opname,)
elif isinstance(target, Collection):
method_name = '_collectionOperation_%s' % (opname,)
elif isinstance(target, ChangeStream):
method_name = '_changeStreamOperation_%s' % (opname,)
elif isinstance(target, NonLazyCursor):
method_name = '_cursor_%s' % (opname,)
elif isinstance(target, ClientSession):
method_name = '_sessionOperation_%s' % (opname,)
elif isinstance(target, GridFSBucket):
raise NotImplementedError
else:
method_name = 'doesNotExist'
try:
method = getattr(self, method_name)
except AttributeError:
try:
cmd = getattr(target, camel_to_snake(opname))
except AttributeError:
self.fail('Unsupported operation %s on entity %s' % (
opname, target))
else:
cmd = functools.partial(method, target)
try:
result = cmd(**dict(arguments))
except Exception as exc:
if ignore:
return
if expect_error:
return self.process_error(exc, expect_error)
raise
else:
if expect_error:
self.fail('Excepted error %s but "%s" succeeded: %s' % (
expect_error, opname, result))
if expect_result:
actual = coerce_result(opname, result)
self.match_evaluator.match_result(expect_result, actual)
if save_as_entity:
self.entity_map[save_as_entity] = result
def __set_fail_point(self, client, command_args):
if not client_context.test_commands_enabled:
self.skipTest('Test commands must be enabled')
cmd_on = SON([('configureFailPoint', 'failCommand')])
cmd_on.update(command_args)
client.admin.command(cmd_on)
self.addCleanup(
client.admin.command,
'configureFailPoint', cmd_on['configureFailPoint'], mode='off')
def _testOperation_failPoint(self, spec):
self.__set_fail_point(
client=self.entity_map[spec['client']],
command_args=spec['failPoint'])
def _testOperation_targetedFailPoint(self, spec):
session = self.entity_map[spec['session']]
if not session._pinned_address:
self.fail("Cannot use targetedFailPoint operation with unpinned "
"session %s" % (spec['session'],))
client = single_client('%s:%s' % session._pinned_address)
self.__set_fail_point(
client=client, command_args=spec['failPoint'])
self.addCleanup(client.close)
def _testOperation_assertSessionTransactionState(self, spec):
session = self.entity_map[spec['session']]
expected_state = getattr(_TxnState, spec['state'].upper())
self.assertEqual(expected_state, session._transaction.state)
def _testOperation_assertSessionPinned(self, spec):
session = self.entity_map[spec['session']]
self.assertIsNotNone(session._transaction.pinned_address)
def _testOperation_assertSessionUnpinned(self, spec):
session = self.entity_map[spec['session']]
self.assertIsNone(session._pinned_address)
self.assertIsNone(session._transaction.pinned_address)
def __get_last_two_command_lsids(self, listener):
cmd_started_events = []
for event in reversed(listener.events):
if isinstance(event, CommandStartedEvent):
cmd_started_events.append(event)
if len(cmd_started_events) < 2:
self.fail('Needed 2 CommandStartedEvents to compare lsids, '
'got %s' % (len(cmd_started_events)))
return tuple([e.command['lsid'] for e in cmd_started_events][:2])
def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec):
listener = self.entity_map.get_listener_for_client(spec['client'])
self.assertNotEqual(*self.__get_last_two_command_lsids(listener))
def _testOperation_assertSameLsidOnLastTwoCommands(self, spec):
listener = self.entity_map.get_listener_for_client(spec['client'])
self.assertEqual(*self.__get_last_two_command_lsids(listener))
def _testOperation_assertSessionDirty(self, spec):
session = self.entity_map[spec['session']]
self.assertTrue(session._server_session.dirty)
def _testOperation_assertSessionNotDirty(self, spec):
session = self.entity_map[spec['session']]
return self.assertFalse(session._server_session.dirty)
def _testOperation_assertCollectionExists(self, spec):
database_name = spec['databaseName']
collection_name = spec['collectionName']
collection_name_list = list(
self.client.get_database(database_name).list_collection_names())
self.assertIn(collection_name, collection_name_list)
def _testOperation_assertCollectionNotExists(self, spec):
database_name = spec['databaseName']
collection_name = spec['collectionName']
collection_name_list = list(
self.client.get_database(database_name).list_collection_names())
self.assertNotIn(collection_name, collection_name_list)
def _testOperation_assertIndexExists(self, spec):
collection = self.client[spec['databaseName']][spec['collectionName']]
index_names = [idx['name'] for idx in collection.list_indexes()]
self.assertIn(spec['indexName'], index_names)
def _testOperation_assertIndexNotExists(self, spec):
collection = self.client[spec['databaseName']][spec['collectionName']]
for index in collection.list_indexes():
self.assertNotEqual(spec['indexName'], index['name'])
def _testOperation_assertNumberConnectionsCheckedOut(self, spec):
client = self.entity_map[spec['client']]
pool = get_pool(client)
self.assertEqual(spec['connections'], pool.active_sockets)
def run_special_operation(self, spec):
opname = spec['name']
method_name = '_testOperation_%s' % (opname,)
try:
method = getattr(self, method_name)
except AttributeError:
self.fail('Unsupported special test operation %s' % (opname,))
else:
method(spec['arguments'])
def run_operations(self, spec):
for op in spec:
target = op['object']
if target != 'testRunner':
self.run_entity_operation(op)
else:
self.run_special_operation(op)
def check_events(self, spec):
for event_spec in spec:
client_name = event_spec['client']
events = event_spec['events']
# Valid types: 'command', 'cmap'
event_type = event_spec.get('eventType', 'command')
assert event_type in ('command', 'cmap')
listener = self.entity_map.get_listener_for_client(client_name)
actual_events = listener.get_events(event_type)
if len(events) == 0:
self.assertEqual(actual_events, [])
continue
if len(events) > len(actual_events):
self.fail('Expected to see %s events, got %s' % (
len(events), len(actual_events)))
for idx, expected_event in enumerate(events):
self.match_evaluator.match_event(
event_type, expected_event, actual_events[idx])
def verify_outcome(self, spec):
for collection_data in spec:
coll_name = collection_data['collectionName']
db_name = collection_data['databaseName']
expected_documents = collection_data['documents']
coll = self.client.get_database(db_name).get_collection(
coll_name,
read_preference=ReadPreference.PRIMARY,
read_concern=ReadConcern(level='local'))
if expected_documents:
sorted_expected_documents = sorted(
expected_documents, key=lambda doc: doc['_id'])
actual_documents = list(
coll.find({}, sort=[('_id', ASCENDING)]))
self.assertListEqual(sorted_expected_documents,
actual_documents)
def run_scenario(self, spec):
# maybe skip test manually
self.maybe_skip_test(spec)
# process test-level runOnRequirements
run_on_spec = spec.get('runOnRequirements', [])
if not self.should_run_on(run_on_spec):
raise unittest.SkipTest('runOnRequirements not satisfied')
# process skipReason
skip_reason = spec.get('skipReason', None)
if skip_reason is not None:
raise unittest.SkipTest('%s' % (skip_reason,))
# process createEntities
self.entity_map = EntityMapUtil(self)
self.entity_map.create_entities_from_spec(
self.TEST_SPEC.get('createEntities', []))
# process initialData
self.insert_initial_data(self.TEST_SPEC.get('initialData', []))
# process operations
self.run_operations(spec['operations'])
# process expectEvents
self.check_events(spec.get('expectEvents', []))
# process outcome
self.verify_outcome(spec.get('outcome', []))
class UnifiedSpecTestMeta(type):
"""Metaclass for generating test classes."""
def __init__(cls, *args, **kwargs):
super(UnifiedSpecTestMeta, cls).__init__(*args, **kwargs)
def create_test(spec):
def test_case(self):
self.run_scenario(spec)
return test_case
for test_spec in cls.TEST_SPEC['tests']:
description = test_spec['description']
test_name = 'test_%s' % (description.strip('. ').
replace(' ', '_').replace('.', '_'),)
test_method = create_test(copy.deepcopy(test_spec))
test_method.__name__ = str(test_name)
for fail_pattern in cls.EXPECTED_FAILURES:
if re.search(fail_pattern, description):
test_method = unittest.expectedFailure(test_method)
break
setattr(cls, test_name, test_method)
_ALL_MIXIN_CLASSES = [
UnifiedSpecTestMixinV1,
# add mixin classes for new schema major versions here
]
_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = {
KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES}
def generate_test_classes(test_path, module=__name__, class_name_prefix='',
expected_failures=[],
bypass_test_generation_errors=False):
"""Method for generating test classes. Returns a dictionary where keys are
the names of test classes and values are the test class objects."""
test_klasses = {}
def test_base_class_factory(test_spec):
"""Utility that creates the base class to use for test generation.
This is needed to ensure that cls.TEST_SPEC is appropriately set when
the metaclass __init__ is invoked."""
class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)):
TEST_SPEC = test_spec
EXPECTED_FAILURES = expected_failures
return SpecTestBase
for dirpath, _, filenames in os.walk(test_path):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
fpath = os.path.join(dirpath, filename)
with open(fpath) as scenario_stream:
# Use tz_aware=False to match how CodecOptions decodes
# dates.
opts = json_util.JSONOptions(tz_aware=False)
scenario_def = json_util.loads(
scenario_stream.read(), json_options=opts)
test_type = os.path.splitext(filename)[0]
snake_class_name = 'Test%s_%s_%s' % (
class_name_prefix, dirname.replace('-', '_'),
test_type.replace('-', '_').replace('.', '_'))
class_name = snake_to_camel(snake_class_name)
try:
schema_version = Version.from_string(
scenario_def['schemaVersion'])
mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(
schema_version[0])
if mixin_class is None:
raise ValueError(
"test file '%s' has unsupported schemaVersion '%s'" % (
fpath, schema_version))
test_klasses[class_name] = type(
class_name,
(mixin_class, test_base_class_factory(scenario_def),),
{'__module__': module})
except Exception:
if bypass_test_generation_errors:
continue
raise
return test_klasses
|
Grirrane/odoo
|
refs/heads/master
|
addons/payment/tests/common.py
|
392
|
# -*- coding: utf-8 -*-
from openerp.tests import common
class PaymentAcquirerCommon(common.TransactionCase):
def setUp(self):
super(PaymentAcquirerCommon, self).setUp()
self.payment_acquirer = self.registry('payment.acquirer')
self.payment_transaction = self.registry('payment.transaction')
self.currency_euro_id = self.registry('res.currency').search(
self.cr, self.uid, [('name', '=', 'EUR')], limit=1)[0]
self.currency_euro = self.registry('res.currency').browse(
self.cr, self.uid, self.currency_euro_id)
self.country_belgium_id = self.registry('res.country').search(
self.cr, self.uid, [('code', 'like', 'BE')], limit=1)[0]
self.country_france_id = self.registry('res.country').search(
self.cr, self.uid, [('code', 'like', 'FR')], limit=1)[0]
# dict partner values
self.buyer_values = {
'name': 'Norbert Buyer',
'lang': 'en_US',
'email': 'norbert.buyer@example.com',
'street': 'Huge Street',
'street2': '2/543',
'phone': '0032 12 34 56 78',
'city': 'Sin City',
'zip': '1000',
'country_id': self.country_belgium_id,
'country_name': 'Belgium',
}
# test partner
self.buyer_id = self.registry('res.partner').create(
self.cr, self.uid, {
'name': 'Norbert Buyer',
'lang': 'en_US',
'email': 'norbert.buyer@example.com',
'street': 'Huge Street',
'street2': '2/543',
'phone': '0032 12 34 56 78',
'city': 'Sin City',
'zip': '1000',
'country_id': self.country_belgium_id,
}
)
|
davidyezsetz/kuma
|
refs/heads/master
|
vendor/packages/ipython/IPython/ConfigLoader.py
|
7
|
# -*- coding: utf-8 -*-
"""Configuration loader
"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import exceptions
import os
from pprint import pprint
from IPython import ultraTB
from IPython.ipstruct import Struct
from IPython.genutils import *
class ConfigLoaderError(exceptions.Exception):
"""Exception for ConfigLoader class."""
def __init__(self,args=None):
self.args = args
class ConfigLoader:
"""Configuration file loader capable of handling recursive inclusions and
with parametrized conflict resolution for multiply found keys."""
def __init__(self,conflict=None,field_sep=None,reclimit=15):
"""The reclimit parameter controls the number of recursive
configuration file inclusions. This way we can stop early on (before
python's own recursion limit is hit) if there is a circular
inclusion.
- conflict: dictionary for conflict resolutions (see Struct.merge())
"""
self.conflict = conflict
self.field_sep = field_sep
self.reset(reclimit)
def reset(self,reclimit=15):
self.reclimit = reclimit
self.recdepth = 0
self.included = []
def load(self,fname,convert=None,recurse_key='',incpath = '.',**kw):
"""Load a configuration file, return the resulting Struct.
Call: load_config(fname,convert=None,conflict=None,recurse_key='')
- fname: file to load from.
- convert: dictionary of type conversions (see read_dict())
- recurse_key: keyword in dictionary to trigger recursive file
inclusions.
"""
if self.recdepth > self.reclimit:
raise ConfigLoaderError, 'maximum recursive inclusion of rcfiles '+\
'exceeded: ' + `self.recdepth` + \
'.\nMaybe you have a circular chain of inclusions?'
self.recdepth += 1
fname = filefind(fname,incpath)
data = Struct()
# avoid including the same file more than once
if fname in self.included:
return data
Xinfo = ultraTB.AutoFormattedTB(color_scheme='NoColor')
if convert==None and recurse_key : convert = {qwflat:recurse_key}
# for production, change warn to 0:
data.merge(read_dict(fname,convert,fs=self.field_sep,strip=1,
warn=0,no_empty=0,**kw))
# keep track of successfully loaded files
self.included.append(fname)
if recurse_key in data:
for incfilename in data[recurse_key]:
found=0
try:
incfile = filefind(incfilename,incpath)
except IOError:
if os.name in ['nt','dos']:
try:
# Try again with '.ini' extension
incfilename += '.ini'
incfile = filefind(incfilename,incpath)
except IOError:
found = 0
else:
found = 1
else:
found = 0
else:
found = 1
if found:
try:
data.merge(self.load(incfile,convert,recurse_key,
incpath,**kw),
self.conflict)
except:
Xinfo()
warn('Problem loading included file: '+
`incfilename` + '. Ignoring it...')
else:
warn('File `%s` not found. Included by %s' % (incfilename,fname))
return data
# end ConfigLoader
|
ahmedshafeeq/otp
|
refs/heads/maint
|
lib/asn1/test/asn1_SUITE_data/XSetOf.py
|
97
|
XSetOf DEFINITIONS ::=
BEGIN
-- F.2.11.4
--Use a set-of type to model a collection of variables whose types are
-- the same and whose order is insignificant.
-- EXAMPLE
Keywords ::= SET OF VisibleString -- in arbitrary order
someASN1Keywords Keywords ::= {"INTEGER", "BOOLEAN", "REAL"}
END
|
zhhf/charging
|
refs/heads/master
|
charging/plugins/vmware/plugins/__init__.py
|
12133432
| |
robintema/django-otp
|
refs/heads/master
|
django_otp/plugins/otp_email/__init__.py
|
12133432
| |
abendig/django-mailchimp
|
refs/heads/master
|
mailchimp/migrations/__init__.py
|
12133432
| |
kobejean/tensorflow
|
refs/heads/master
|
tensorflow/examples/tutorials/input_fn/__init__.py
|
12133432
| |
keedio/hue
|
refs/heads/master
|
desktop/core/ext-py/django-nose-1.3/django_nose/management/__init__.py
|
12133432
| |
romain-dartigues/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/spam6/__init__.py
|
12133432
| |
nuagenetworks/nuage-openstack-neutron
|
refs/heads/master
|
nuage_neutron/db/migration/alembic_migrations/versions/liberty_release.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Liberty
Revision ID: liberty
Revises: None
Create Date: 2015-11-13 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'liberty'
down_revision = None
def upgrade():
"""A no-op migration for marking the Liberty release."""
pass
|
Sendoushi/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/__init__.py
|
329
|
from server import WebTestHttpd, WebTestServer, Router
from request import Request
from response import Response
|
LearnEra/LearnEraPlaftform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/course_id_clash.py
|
18
|
"""
Script for finding all courses whose org/name pairs == other courses when ignoring case
"""
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
#
# To run from command line: ./manage.py cms --settings dev course_id_clash
#
class Command(BaseCommand):
"""
Script for finding all courses in the Mongo Modulestore whose org/name pairs == other courses when ignoring case
"""
help = 'List all courses ids in the Mongo Modulestore which may collide when ignoring case'
def handle(self, *args, **options):
mstore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
if hasattr(mstore, 'collection'):
map_fn = '''
function () {
emit(this._id.org.toLowerCase()+this._id.course.toLowerCase(), {target: this._id});
}
'''
reduce_fn = '''
function (idpair, matches) {
var result = {target: []};
matches.forEach(function (match) {
result.target.push(match.target);
});
return result;
}
'''
finalize = '''
function(key, reduced) {
if (Array.isArray(reduced.target)) {
return reduced;
}
else {return null;}
}
'''
results = mstore.collection.map_reduce(
map_fn, reduce_fn, {'inline': True}, query={'_id.category': 'course'}, finalize=finalize
)
results = results.get('results')
for entry in results:
if entry.get('value') is not None:
print '{:-^40}'.format(entry.get('_id'))
for course_id in entry.get('value').get('target'):
print ' {}/{}/{}'.format(course_id.get('org'), course_id.get('course'), course_id.get('name'))
|
svn2github/gyp
|
refs/heads/master
|
pylib/gyp/common.py
|
9
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
|
GuillaumeBadi/Python-App-Engine
|
refs/heads/master
|
lib/requests/auth.py
|
41
|
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
|
abhattad4/Digi-Menu
|
refs/heads/master
|
digimenu2/build/lib.linux-x86_64-2.7/django/contrib/gis/db/backends/postgis/models.py
|
396
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
Nick-OpusVL/odoo
|
refs/heads/8.0
|
addons/hr_timesheet/wizard/hr_timesheet_sign_in_out.py
|
340
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_so_project(osv.osv_memory):
_name = 'hr.sign.out.project'
_description = 'Sign Out By Project'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Project / Analytic Account', domain=[('type','=','normal')]),
'info': fields.char('Work Description', required=True),
'date_start': fields.datetime('Starting Date', readonly=True),
'date': fields.datetime('Closing Date'),
'analytic_amount': fields.float('Minimum Analytic Amount'),
'name': fields.char('Employee\'s Name', required=True, readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'server_date': fields.datetime('Current Date', required=True, readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def _get_empid(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
for employee in emp_obj.browse(cr, uid, emp_ids, context=context):
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_ids[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')}
def _get_empid2(self, cr, uid, context=None):
res = self._get_empid(cr, uid, context=context)
cr.execute('select name,action from hr_attendance where employee_id=%s order by name desc limit 1', (res['emp_id'],))
res['server_date'] = time.strftime('%Y-%m-%d %H:%M:%S')
date_start = cr.fetchone()
if date_start:
res['date_start'] = date_start[0]
return res
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_so_project, self).default_get(cr, uid, fields_list, context=context)
res.update(self._get_empid2(cr, uid, context=context))
return res
def _write(self, cr, uid, data, emp_id, context=None):
timesheet_obj = self.pool.get('hr.analytic.timesheet')
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
hour = (time.mktime(time.strptime(data['date'] or time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')) -
time.mktime(time.strptime(data['date_start'], '%Y-%m-%d %H:%M:%S'))) / 3600.0
minimum = data['analytic_amount']
if minimum:
hour = round(round((hour + minimum / 2) / minimum) * minimum, 2)
res = timesheet_obj.default_get(cr, uid, ['product_id','product_uom_id'], context=context)
if not res['product_uom_id']:
raise osv.except_osv(_('User Error!'), _('Please define cost unit for this employee.'))
up = timesheet_obj.on_change_unit_amount(cr, uid, False, res['product_id'], hour,False, res['product_uom_id'])['value']
res['name'] = data['info']
res['account_id'] = data['account_id'].id
res['unit_amount'] = hour
emp_journal = emp_obj.browse(cr, uid, emp_id, context=context).journal_id
res['journal_id'] = emp_journal and emp_journal.id or False
res.update(up)
up = timesheet_obj.on_change_account_id(cr, uid, [], res['account_id']).get('value', {})
res.update(up)
return timesheet_obj.create(cr, uid, res, context=context)
def sign_out_result_end(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_out', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
def sign_out_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'action', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class hr_si_project(osv.osv_memory):
_name = 'hr.sign.in.project'
_description = 'Sign In By Project'
_columns = {
'name': fields.char('Employee\'s Name', readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'date': fields.datetime('Starting Date'),
'server_date': fields.datetime('Current Date', readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise osv.except_osv(_('User Error!'), _('Please define employee for your user.'))
return False
def check_state(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
emp_id = self.default_get(cr, uid, ['emp_id'], context)['emp_id']
# get the latest action (sign_in or out) for this employee
cr.execute('select action from hr_attendance where employee_id=%s and action in (\'sign_in\',\'sign_out\') order by name desc limit 1', (emp_id,))
res = (cr.fetchone() or ('sign_out',))[0]
in_out = (res == 'sign_out') and 'in' or 'out'
#TODO: invert sign_in et sign_out
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_hr_timesheet_sign_%s' % in_out)], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.%s.project' % in_out,
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new'
}
def sign_in_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_in', 'action_date':data.date})
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_si_project, self).default_get(cr, uid, fields_list, context=context)
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_id:
for employee in emp_obj.browse(cr, uid, emp_id, context=context):
res.update({'name': employee.name, 'state': employee.state, 'emp_id': emp_id[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
huanpc/IoT-1
|
refs/heads/master
|
gui/controller/.venv/lib/python3.5/site-packages/pip/_vendor/progress/counter.py
|
510
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
|
marqh/iris
|
refs/heads/master
|
lib/iris/tests/unit/analysis/scipy_interpolate/test__RegularGridInterpolator.py
|
16
|
# (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the
:func:`iris.analysis._scipy_interpolate._RegularGridInterpolator` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris.analysis._scipy_interpolate import _RegularGridInterpolator
from scipy.sparse.csr import csr_matrix
from iris.tests import mock
import iris.tests.stock as stock
class Test(tests.IrisTest):
def setUp(self):
# Load a source cube, then generate an interpolator instance, calculate
# the interpolation weights and set up a target grid.
self.cube = stock.simple_2d()
x_points = self.cube.coord('bar').points
y_points = self.cube.coord('foo').points
self.interpolator = _RegularGridInterpolator([x_points, y_points],
self.cube.data,
method='linear',
bounds_error=False,
fill_value=None)
newx = x_points + 0.7
newy = y_points + 0.7
d_0 = self.cube.data[0, 0]
d_1 = self.cube.data[0, 1]
d_2 = self.cube.data[1, 0]
d_3 = self.cube.data[1, 1]
px_0, px_1 = x_points[0], x_points[1]
py_0, py_1 = y_points[0], y_points[1]
px_t = px_0 + 0.7
py_t = py_0 + 0.7
dyt_0 = self._interpolate_point(py_t, py_0, py_1, d_0, d_1)
dyt_1 = self._interpolate_point(py_t, py_0, py_1, d_2, d_3)
self.test_increment = self._interpolate_point(px_t, px_0, px_1,
dyt_0, dyt_1)
xv, yv = np.meshgrid(newy, newx)
self.tgrid = np.dstack((yv, xv))
self.weights = self.interpolator.compute_interp_weights(self.tgrid)
@staticmethod
def _interpolate_point(p_t, p_0, p_1, d_0, d_1):
return d_0 + (d_1 - d_0)*((p_t - p_0)/(p_1 - p_0))
def test_compute_interp_weights(self):
weights = self.weights
self.assertIsInstance(weights, tuple)
self.assertEqual(len(weights), 5)
self.assertEqual(weights[0], self.tgrid.shape)
self.assertEqual(weights[1], 'linear')
self.assertIsInstance(weights[2], csr_matrix)
def test__evaluate_linear_sparse(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator._evaluate_linear_sparse(weights[2])
test_data = self.cube.data.reshape(-1) + self.test_increment
self.assertArrayAlmostEqual(output_data, test_data)
def test_interp_using_pre_computed_weights(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator.interp_using_pre_computed_weights(weights)
test_data = self.cube.data + self.test_increment
self.assertEqual(output_data.shape, self.cube.data.shape)
self.assertArrayAlmostEqual(output_data, test_data)
if __name__ == "__main__":
tests.main()
|
thingsboard/thingsboard-gateway
|
refs/heads/master
|
thingsboard_gateway/tb_utility/tb_gateway_remote_configurator.py
|
1
|
# Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import remove, linesep
from os.path import exists, dirname
from re import findall
from time import time, sleep
from logging import getLogger
from logging.config import fileConfig
from base64 import b64encode, b64decode
from simplejson import dumps, loads, dump
from yaml import safe_dump
from configparser import ConfigParser
from thingsboard_gateway.gateway.tb_client import TBClient
from thingsboard_gateway.tb_utility.tb_loader import TBModuleLoader
from thingsboard_gateway.tb_utility.tb_logger import TBLoggerHandler
# pylint: disable=protected-access
LOG = getLogger("service")
class RemoteConfigurator:
def __init__(self, gateway, config):
self.__gateway = gateway
self.__new_configuration = None
self.__old_configuration = None
self.__apply_timeout = 10
self.__old_tb_client = None
self.__old_logs_configuration = self.__get_current_logs_configuration()
self.__new_logs_configuration = None
self.__old_connectors_configs = {}
self.__new_connectors_configs = {}
self.__old_general_configuration_file = config
self.__new_general_configuration_file = {}
self.__old_event_storage = None
self.__new_event_storage = None
self.in_process = False
def process_configuration(self, configuration):
try:
if not self.in_process:
self.in_process = True
# while not self.__gateway._published_events.empty():
# LOG.debug("Waiting for end of the data processing...")
# sleep(1)
decoded_configuration = b64decode(configuration)
self.__new_configuration = loads(decoded_configuration)
self.__old_connectors_configs = self.__gateway.connectors_configs
self.__new_general_configuration_file = self.__new_configuration.get("thingsboard")
self.__new_logs_configuration = b64decode(self.__new_general_configuration_file.pop("logs")).decode('UTF-8').replace('}}', '\n')
if self.__old_configuration != decoded_configuration:
LOG.info("Remote configuration received: \n %s", decoded_configuration)
result = self.__process_connectors_configuration()
self.in_process = False
if result:
self.__old_configuration = self.__new_configuration
return True
else:
return False
else:
LOG.info("Remote configuration is the same.")
else:
LOG.error("Remote configuration is already in processing")
return False
except Exception as e:
self.in_process = False
LOG.exception(e)
def send_current_configuration(self):
try:
current_configuration = {}
for connector in self.__gateway.connectors_configs:
if current_configuration.get(connector) is None:
current_configuration[connector] = []
for config in self.__gateway.connectors_configs[connector]:
for config_file in config['config']:
current_configuration[connector].append({'name': config['name'], 'config': config['config'][config_file]})
current_configuration["thingsboard"] = self.__old_general_configuration_file
current_configuration["thingsboard"]["logs"] = b64encode(self.__old_logs_configuration.replace('\n', '}}').encode("UTF-8"))
json_current_configuration = dumps(current_configuration)
encoded_current_configuration = b64encode(json_current_configuration.encode())
self.__old_configuration = encoded_current_configuration
self.__gateway.tb_client.client.send_attributes(
{"current_configuration": encoded_current_configuration.decode("UTF-8")})
LOG.debug('Current configuration has been sent to ThingsBoard: %s', json_current_configuration)
except Exception as e:
LOG.exception(e)
def __process_connectors_configuration(self):
LOG.info("Processing remote connectors configuration...")
if self.__apply_new_connectors_configuration():
self.__write_new_configuration_files()
self.__apply_storage_configuration()
if self.__safe_apply_connection_configuration():
LOG.info("Remote configuration has been applied.")
with open(self.__gateway.get_config_path() + "tb_gateway.yaml", "w", encoding="UTF-8") as general_configuration_file:
safe_dump(self.__new_general_configuration_file, general_configuration_file)
self.__old_connectors_configs = {}
self.__new_connectors_configs = {}
self.__old_general_configuration_file = self.__new_general_configuration_file
self.__old_logs_configuration = self.__new_logs_configuration
self.__update_logs_configuration()
self.__new_logs_configuration = None
self.__new_general_configuration_file = {}
return True
else:
self.__update_logs_configuration()
self.__old_general_configuration_file.pop("logs")
with open(self.__gateway.get_config_path() + "tb_gateway.yaml", "w", encoding="UTF-8") as general_configuration_file:
safe_dump(self.__old_general_configuration_file, general_configuration_file)
LOG.error("A remote general configuration applying has been failed.")
self.__old_connectors_configs = {}
self.__new_connectors_configs = {}
self.__new_logs_configuration = None
self.__new_general_configuration_file = {}
return False
def __prepare_connectors_configuration(self, input_connector_config):
try:
self.__gateway.connectors_configs = {}
for connector in input_connector_config['thingsboard']['connectors']:
for input_connector in input_connector_config[connector['type']]:
if input_connector['name'] == connector['name']:
if not self.__gateway.connectors_configs.get(connector['type']):
self.__gateway.connectors_configs[connector['type']] = []
self.__gateway.connectors_configs[connector['type']].append(
{"name": connector["name"], "config": {connector['configuration']: input_connector["config"]}})
connector_class = TBModuleLoader.import_module(connector["type"], self.__gateway._default_connectors.get(connector["type"], connector.get("class")))
self.__gateway._implemented_connectors[connector["type"]] = connector_class
except Exception as e:
LOG.exception(e)
def __apply_new_connectors_configuration(self):
try:
self.__prepare_connectors_configuration(self.__new_configuration)
for connector_name in self.__gateway.available_connectors:
try:
self.__gateway.available_connectors[connector_name].close()
except Exception as e:
LOG.exception(e)
self.__gateway._connect_with_connectors()
LOG.debug("New connectors configuration has been applied")
self.__old_connectors_configs = {}
return True
except Exception as e:
self.__gateway.connectors_configs = self.__old_connectors_configs
for connector_name in self.__gateway.available_connectors:
self.__gateway.available_connectors[connector_name].close()
self.__gateway._load_connectors(self.__old_general_configuration_file)
self.__gateway._connect_with_connectors()
LOG.exception(e)
return False
def __write_new_configuration_files(self):
try:
self.__new_connectors_configs = self.__new_connectors_configs if self.__new_connectors_configs else self.__gateway.connectors_configs
new_connectors_files = []
for connector_type in self.__new_connectors_configs:
for connector_config_section in self.__new_connectors_configs[connector_type]:
for connector_file in connector_config_section["config"]:
connector_config = connector_config_section["config"][connector_file]
with open(self.__gateway.get_config_path() + connector_file, "w", encoding="UTF-8") as config_file:
dump(connector_config, config_file, sort_keys=True, indent=2)
new_connectors_files.append(connector_file)
LOG.debug("Saving new configuration for \"%s\" connector to file \"%s\"", connector_type,
connector_file)
break
self.__old_general_configuration_file["connectors"] = self.__new_general_configuration_file["connectors"]
for old_connector_type in self.__old_connectors_configs:
for old_connector_config_section in self.__old_connectors_configs[old_connector_type]:
for old_connector_file in old_connector_config_section["config"]:
if old_connector_file not in new_connectors_files:
remove(self.__gateway.get_config_path() + old_connector_file)
LOG.debug("Remove old configuration file \"%s\" for \"%s\" connector ", old_connector_file,
old_connector_type)
except Exception as e:
LOG.exception(e)
def __safe_apply_connection_configuration(self):
apply_start = time() * 1000
self.__old_tb_client = self.__gateway.tb_client
try:
self.__old_tb_client.unsubscribe('*')
self.__old_tb_client.stop()
self.__old_tb_client.disconnect()
self.__gateway.tb_client = TBClient(self.__new_general_configuration_file["thingsboard"])
self.__gateway.tb_client.connect()
connection_state = False
while time() * 1000 - apply_start < self.__apply_timeout * 1000 and not connection_state:
connection_state = self.__gateway.tb_client.is_connected()
sleep(.1)
if not connection_state:
self.__revert_configuration()
LOG.info("The gateway cannot connect to the ThingsBoard server with a new configuration.")
return False
else:
self.__old_tb_client.stop()
self.__gateway.subscribe_to_required_topics()
return True
except Exception as e:
LOG.exception(e)
self.__revert_configuration()
return False
def __apply_storage_configuration(self):
if self.__old_general_configuration_file["storage"] != self.__new_general_configuration_file["storage"]:
self.__old_event_storage = self.__gateway._event_storage
try:
storage_class = self.__gateway._event_storage_types[self.__new_general_configuration_file["storage"]["type"]]
self.__gateway._event_storage = storage_class(self.__new_general_configuration_file["storage"])
self.__old_event_storage = None
except Exception as e:
LOG.exception(e)
self.__gateway._event_storage = self.__old_event_storage
def __revert_configuration(self):
try:
LOG.info("Remote general configuration will be restored.")
self.__new_general_configuration_file = self.__old_general_configuration_file
self.__gateway.tb_client.disconnect()
self.__gateway.tb_client.stop()
self.__gateway.tb_client = TBClient(self.__old_general_configuration_file["thingsboard"])
self.__gateway.tb_client.connect()
self.__gateway.subscribe_to_required_topics()
LOG.debug("%s connection has been restored", str(self.__gateway.tb_client.client._client))
except Exception as e:
LOG.exception("Exception on reverting configuration occurred:")
LOG.exception(e)
def __get_current_logs_configuration(self):
try:
with open(self.__gateway.get_config_path() + 'logs.conf', 'r', encoding="UTF-8") as logs:
current_logs_configuration = logs.read()
return current_logs_configuration
except Exception as e:
LOG.exception(e)
def __update_logs_configuration(self):
global LOG
try:
LOG = getLogger('service')
logs_conf_file_path = self.__gateway.get_config_path() + 'logs.conf'
new_logging_level = findall(r'level=(.*)', self.__new_logs_configuration.replace("NONE", "NOTSET"))[-1]
new_logging_config = self.__new_logs_configuration.replace("NONE", "NOTSET").replace("\r\n", linesep)
logs_config = ConfigParser(allow_no_value=True)
logs_config.read_string(new_logging_config)
for section in logs_config:
if "handler_" in section and section != "handler_consoleHandler":
args = tuple(logs_config[section]["args"]
.replace('(', '')
.replace(')', '')
.split(', '))
path = args[0][1:-1]
LOG.debug("Checking %s...", path)
if not exists(dirname(path)):
raise FileNotFoundError
with open(logs_conf_file_path, 'w', encoding="UTF-8") as logs:
logs.write(self.__new_logs_configuration.replace("NONE", "NOTSET")+"\r\n")
fileConfig(logs_config)
LOG = getLogger('service')
# self.__gateway.remote_handler.deactivate()
self.__gateway.remote_handler = TBLoggerHandler(self.__gateway)
self.__gateway.main_handler.setLevel(new_logging_level)
self.__gateway.main_handler.setTarget(self.__gateway.remote_handler)
if new_logging_level == "NOTSET":
self.__gateway.remote_handler.deactivate()
else:
self.__gateway.remote_handler.activate(new_logging_level)
LOG.debug("Logs configuration has been updated.")
except Exception as e:
LOG.error("Remote logging configuration is wrong!")
LOG.exception(e)
|
coolhacks/docker-hacks
|
refs/heads/master
|
examples/traductor/traductor/translators/base.py
|
2
|
class BaseTranslator(object):
def translate(self, value):
"""
:param value:
:return:
"""
raise NotImplementedError("translate method must be implemented.")
|
antb/TPT----My-old-mod
|
refs/heads/master
|
src/python/stdlib/test/test_dictcomps.py
|
39
|
doctests = """
>>> k = "old value"
>>> { k: None for k in range(10) }
{0: None, 1: None, 2: None, 3: None, 4: None, 5: None, 6: None, 7: None, 8: None, 9: None}
>>> k
'old value'
>>> { k: k+10 for k in range(10) }
{0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17, 8: 18, 9: 19}
>>> g = "Global variable"
>>> { k: g for k in range(10) }
{0: 'Global variable', 1: 'Global variable', 2: 'Global variable', 3: 'Global variable', 4: 'Global variable', 5: 'Global variable', 6: 'Global variable', 7: 'Global variable', 8: 'Global variable', 9: 'Global variable'}
>>> { k: v for k in range(10) for v in range(10) if k == v }
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
>>> { k: v for v in range(10) for k in range(v*9, v*10) }
{9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4, 38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6, 55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7, 66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8, 76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9, 85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
>>> { x: y for y, x in ((1, 2), (3, 4)) } = 5 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> { x: y for y, x in ((1, 2), (3, 4)) } += 5 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import test_support
from test import test_dictcomps
test_support.run_doctest(test_dictcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
test_support.run_doctest(test_dictcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
yannickcr/Sick-Beard
|
refs/heads/development
|
bs4/tests/test_lxml.py
|
97
|
"""Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError, e:
LXML_PRESENT = False
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=False) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
|
adw0rd/lettuce-py3
|
refs/heads/master
|
tests/unit/test_language.py
|
7
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import assert_equals
from lettuce.core import Language
def test_language_is_english_by_default():
"Language class is english by default"
lang = Language()
assert_equals(lang.code, 'en')
assert_equals(lang.name, 'English')
assert_equals(lang.native, 'English')
assert_equals(lang.feature, 'Feature')
assert_equals(lang.scenario, 'Scenario')
assert_equals(lang.examples, 'Examples|Scenarios')
assert_equals(lang.scenario_outline, 'Scenario Outline')
def test_language_has_first_of():
"Language() can pick up first occurrece of a string"
lang = Language()
assert_equals(lang.first_of_examples, 'Examples')
|
mancoast/mpir
|
refs/heads/master
|
build.vc15/mpir-tune/tune/tune_prebuild.py
|
37
|
from os.path import exists
print('Performing tune prebuild ...')
basic = [
'dc_bdiv_qr.c', 'dc_bdiv_qr_n.c', 'dc_bdiv_q.c', 'dc_divappr_q.c',
'dc_div_qr.c', 'dc_div_qr_n.c', 'divrem_2.c', 'divrem_euclidean_r_1.c',
'divrem_hensel_qr_1.c', 'gcd.c', 'gcdext.c', 'get_str.c', 'hgcd.c',
'hgcd_appr.c', 'hgcd_reduce.c', 'inv_divappr_q.c', 'inv_div_qr.c',
'matrix22_mul.c', 'mul.c', 'mulhigh_n.c', 'mullow_n.c', 'mulmod_2expm1.c',
'mulmod_2expp1_basecase.c', 'mul_n.c', 'rootrem.c', 'rsh_divrem_hensel_qr_1.c',
'sb_divappr_q.c', 'sb_div_qr.c', 'set_str.c', 'tdiv_q.c', 'tdiv_qr.c',
'toom3_mul.c', 'toom3_mul_n.c', 'toom4_mul.c', 'toom4_mul_n.c',
'toom8h_mul.c', 'toom8_sqr_n.c'
]
fft_basic = [
'adjust.c', 'adjust_sqrt2.c', 'butterfly_lshB.c', 'butterfly_rshB.c',
'combine_bits.c', 'div_2expmod_2expp1.c', 'fermat_to_mpz.c',
'fft_mfa_trunc_sqrt2.c', 'fft_mfa_trunc_sqrt2_inner.c', 'fft_negacyclic.c',
'fft_radix2.c', 'fft_trunc.c', 'ifft_negacyclic.c', 'ifft_radix2.c',
'ifft_trunc.c', 'ifft_trunc_sqrt2.c', 'mul_2expmod_2expp1.c',
'mul_fft_main.c', 'mul_mfa_trunc_sqrt2.c', 'mulmod_2expp1.c',
'mul_trunc_sqrt2.c', 'normmod_2expp1.c', 'revbin.c', 'split_bits.c'
]
for n in basic:
if not exists(n) :
with open(n, "wb") as f :
lines = [('#define TUNE_PROGRAM_BUILD 1\r\n').encode()]
lines += [('#include "..\\..\\..\\mpn\\generic\\' + n + '"\r\n').encode()]
f.writelines(lines)
for n in fft_basic:
if not exists(n) :
with open(n, "wb") as f :
lines = [('#define TUNE_PROGRAM_BUILD 1\r\n').encode()]
lines += [('#include "..\\..\\..\\fft\\' + n + '"\r\n').encode()]
f.writelines(lines)
n = 'divrem_1.c'
if not exists(n) :
with open(n, "wb") as f :
lines = [('#define TUNE_PROGRAM_BUILD 1\r\n').encode()]
lines += [('#define __gmpn_divrem_1 mpn_divrem_1_tune\r\n').encode()]
lines += [('#include "..\\..\\..\\mpn\\generic\\' + n + '"\r\n').encode()]
f.writelines(lines)
n = 'mod_1.c'
if not exists(n) :
with open(n, "wb") as f :
lines = [('#define TUNE_PROGRAM_BUILD 1\r\n').encode()]
lines += [('#define __gmpn_mod_1 mpn_mod_1_tune\r\n').encode()]
lines += [('#include "..\\..\\..\\mpn\\generic\\' + n + '"\r\n').encode()]
f.writelines(lines)
n = 'sqr_basecase.asm'
if not exists(n) :
with open(n, "wb") as f :
lines = [('%define SQR_KARATSUBA_THRESHOLD_OVERRIDE SQR_KARATSUBA_THRESHOLD_MAX\r\n').encode()]
lines += [('%include "..\\..\\..\\mpn\\x86_64w\\' + n + '"\r\n').encode()]
f.writelines(lines)
n = 'fac_ui.c'
if not exists(n) :
with open(n, "wb") as f :
lines = [('#define TUNE_PROGRAM_BUILD 1\r\n').encode()]
lines += [('#define __gmpz_fac_ui mpz_fac_ui_tune\r\n').encode()]
lines += [('#define __gmpz_oddfac_1 mpz_oddfac_1_tune\r\n').encode()]
lines += [('#include "..\\..\\..\\mpz\\' + 'oddfac_1.c' + '"\r\n').encode()]
lines += [('#include "..\\..\\..\\mpz\\' + 'fac_ui.c' + '"\r\n').encode()]
f.writelines(lines)
|
tmr232/Gooey
|
refs/heads/master
|
gooey/_tmp/mockapp.py
|
9
|
'''
Created on Dec 21, 2013
@author: Chris
'''
import sys
import hashlib
from time import time as _time, time
from time import sleep as _sleep
# from argparse import ArgumentParser
# import argparse
import argparse as ap
from argparse import ArgumentParser as AP
from gooey import Gooey
from gooey import GooeyParser
def main():
print 'hello'
'''
does stuff with parser.parse_args()
'''
desc = "Mock application to test Gooey's functionality"
file_help_msg = "Name of the file you want to process"
my_cool_parser = GooeyParser(description=desc)
my_cool_parser.add_argument("filename", help=file_help_msg, widget="FileChooser") # positional
my_cool_parser.add_argument("outfile", help="Name of the file where you'll save the output") # positional
my_cool_parser.add_argument('-c', '--countdown', default=2, type=int, help='sets the time to count down from you see its quite simple!')
# my_cool_parser.add_argument('-c', '--cron-schedule', default=10, type=int, help='Set the datetime when the cron should begin', widget='DateChooser')
my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer")
my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit")
my_cool_parser.add_argument('-v', '--verbose', action='count')
my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!")
my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders')
my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something")
my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes")
# verbosity = my_cool_parser.add_mutually_exclusive_group()
# verbosity.add_argument('-t', '--verbozze', dest='verbose', action="store_true", help="Show more details")
# verbosity.add_argument('-q', '--quiet', dest='quiet', action="store_true", help="Only output on error")
print my_cool_parser._actions
print 'inside of main(), my_cool_parser =', my_cool_parser
args = my_cool_parser.parse_args()
print 'EHOOOOOOOOOOOO'
print sys.argv
print args.countdown
print args.showtime
start_time = _time()
print 'Counting down from %s' % args.countdown
while _time() - start_time < args.countdown:
if args.showtime:
print 'printing message at: %s' % _time()
else:
print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
_sleep(.5)
print 'Finished running the program. Byeeeeesss!'
raise ValueError("Something has gone wrong! AHHHHHHHHHHH")
def here_is_smore():
pass
if __name__ == '__main__':
print sys.argv
main()
# import inspect
# import dis
# # print dir(main.__code__)
# # for i in dir(main.__code__):
# # print i, getattr(main.__code__, i)
# print dis.dis(main.__code__)
# # for i in inspect.getmembers(main):
# # print i
|
MakeHer/edx-platform
|
refs/heads/dashboard.2
|
openedx/core/djangoapps/content/course_overviews/migrations/0002_add_course_catalog_fields.py
|
81
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_overviews', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='courseoverview',
name='announcement',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='catalog_visibility',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='course_video_url',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='effort',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='short_description',
field=models.TextField(null=True),
),
]
|
mikebsg01/Programming-Contests
|
refs/heads/master
|
CODEFORCES/Codeforces Round #552 (Div. 3)/Template.py
|
45
|
from sys import stdin
def readLine():
return stdin.readline().strip()
def readInt():
return int(readLine())
def readInts():
return list(map(int, readLine().split()))
T, N = 0, 0
def main():
N = readInt()
print(N)
if __name__ == '__main__':
main()
|
nkalodimas/invenio
|
refs/heads/master
|
modules/websubmit/lib/functions/Print_Success_Approval_Request.py
|
39
|
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is Print_Success_Approval_Request. It creates a "success
message" that is shown to the user to indicate that their approval
request has successfully been registered.
"""
__revision__ = "$Id$"
def Print_Success_Approval_Request(parameters, curdir, form, user_info=None):
"""
This function creates a "success message" that is to be shown to the
user to indicate that their approval request has successfully been
registered.
@parameters: None.
@return: (string) - the "success" message for the user.
"""
text = """<br />
<div>
The approval request for your document has successfully been
registered and the referee has been informed.<br />
You will be notified by email when a decision has been made.
</div>
<br />"""
return text
|
ncliam/serverpos
|
refs/heads/master
|
openerp/addons/website_event_track/__init__.py
|
1577
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
|
ufukdogan92/is-teklif-sistemi
|
refs/heads/master
|
ilan/migrations/0006_auto_20170522_1704.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-22 17:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ilan', '0005_auto_20170522_1527'),
]
operations = [
migrations.AddField(
model_name='ilan',
name='sureUzunlugu',
field=models.CharField(choices=[('TL', 'Türk Lirası'), ('DLR', 'DOLAR'), ('EUR', 'EURO')], default='TL', max_length=3),
),
migrations.AlterField(
model_name='ilan',
name='butceTipi',
field=models.CharField(choices=[('S', 'Saat'), ('G', 'Gün'), ('A', 'Ay'), ('H', 'Hafta'), ('Y', 'Yıl')], default='G', max_length=3),
),
]
|
zhuwenping/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/mail/test/test_imap.py
|
49
|
# -*- test-case-name: twisted.mail.test.test_imap -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test case for twisted.mail.imap4
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import os
import types
import codecs
from zope.interface import implements
from twisted.mail.imap4 import MessageSet
from twisted.mail import imap4
from twisted.protocols import loopback
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import reactor
from twisted.internet import interfaces
from twisted.internet.task import Clock
from twisted.trial import unittest
from twisted.python import util
from twisted.python import failure
from twisted import cred
import twisted.cred.error
import twisted.cred.checkers
import twisted.cred.credentials
import twisted.cred.portal
from twisted.test.proto_helpers import StringTransport, StringTransportWithDisconnection
try:
from twisted.test.ssl_helpers import ClientTLSContext, ServerTLSContext
except ImportError:
ClientTLSContext = ServerTLSContext = None
def strip(f):
return lambda result, f=f: f()
def sortNest(l):
l = l[:]
l.sort()
for i in range(len(l)):
if isinstance(l[i], types.ListType):
l[i] = sortNest(l[i])
elif isinstance(l[i], types.TupleType):
l[i] = tuple(sortNest(list(l[i])))
return l
class IMAP4UTF7TestCase(unittest.TestCase):
tests = [
[u'Hello world', 'Hello world'],
[u'Hello & world', 'Hello &- world'],
[u'Hello\xffworld', 'Hello&AP8-world'],
[u'\xff\xfe\xfd\xfc', '&AP8A,gD9APw-'],
[u'~peter/mail/\u65e5\u672c\u8a9e/\u53f0\u5317',
'~peter/mail/&ZeVnLIqe-/&U,BTFw-'], # example from RFC 2060
]
def test_encodeWithErrors(self):
"""
Specifying an error policy to C{unicode.encode} with the
I{imap4-utf-7} codec should produce the same result as not
specifying the error policy.
"""
text = u'Hello world'
self.assertEqual(
text.encode('imap4-utf-7', 'strict'),
text.encode('imap4-utf-7'))
def test_decodeWithErrors(self):
"""
Similar to L{test_encodeWithErrors}, but for C{str.decode}.
"""
bytes = 'Hello world'
self.assertEqual(
bytes.decode('imap4-utf-7', 'strict'),
bytes.decode('imap4-utf-7'))
def test_getreader(self):
"""
C{codecs.getreader('imap4-utf-7')} returns the I{imap4-utf-7} stream
reader class.
"""
reader = codecs.getreader('imap4-utf-7')(StringIO('Hello&AP8-world'))
self.assertEquals(reader.read(), u'Hello\xffworld')
def test_getwriter(self):
"""
C{codecs.getwriter('imap4-utf-7')} returns the I{imap4-utf-7} stream
writer class.
"""
output = StringIO()
writer = codecs.getwriter('imap4-utf-7')(output)
writer.write(u'Hello\xffworld')
self.assertEquals(output.getvalue(), 'Hello&AP8-world')
def test_encode(self):
"""
The I{imap4-utf-7} can be used to encode a unicode string into a byte
string according to the IMAP4 modified UTF-7 encoding rules.
"""
for (input, output) in self.tests:
self.assertEquals(input.encode('imap4-utf-7'), output)
def test_decode(self):
"""
The I{imap4-utf-7} can be used to decode a byte string into a unicode
string according to the IMAP4 modified UTF-7 encoding rules.
"""
for (input, output) in self.tests:
self.assertEquals(input, output.decode('imap4-utf-7'))
def test_printableSingletons(self):
"""
The IMAP4 modified UTF-7 implementation encodes all printable
characters which are in ASCII using the corresponding ASCII byte.
"""
# All printables represent themselves
for o in range(0x20, 0x26) + range(0x27, 0x7f):
self.failUnlessEqual(chr(o), chr(o).encode('imap4-utf-7'))
self.failUnlessEqual(chr(o), chr(o).decode('imap4-utf-7'))
self.failUnlessEqual('&'.encode('imap4-utf-7'), '&-')
self.failUnlessEqual('&-'.decode('imap4-utf-7'), '&')
class BufferingConsumer:
def __init__(self):
self.buffer = []
def write(self, bytes):
self.buffer.append(bytes)
if self.consumer:
self.consumer.resumeProducing()
def registerProducer(self, consumer, streaming):
self.consumer = consumer
self.consumer.resumeProducing()
def unregisterProducer(self):
self.consumer = None
class MessageProducerTestCase(unittest.TestCase):
def testSinglePart(self):
body = 'This is body text. Rar.'
headers = util.OrderedDict()
headers['from'] = 'sender@host'
headers['to'] = 'recipient@domain'
headers['subject'] = 'booga booga boo'
headers['content-type'] = 'text/plain'
msg = FakeyMessage(headers, (), None, body, 123, None )
c = BufferingConsumer()
p = imap4.MessageProducer(msg)
d = p.beginProducing(c)
def cbProduced(result):
self.assertIdentical(result, p)
self.assertEquals(
''.join(c.buffer),
'{119}\r\n'
'From: sender@host\r\n'
'To: recipient@domain\r\n'
'Subject: booga booga boo\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
+ body)
return d.addCallback(cbProduced)
def testSingleMultiPart(self):
outerBody = ''
innerBody = 'Contained body message text. Squarge.'
headers = util.OrderedDict()
headers['from'] = 'sender@host'
headers['to'] = 'recipient@domain'
headers['subject'] = 'booga booga boo'
headers['content-type'] = 'multipart/alternative; boundary="xyz"'
innerHeaders = util.OrderedDict()
innerHeaders['subject'] = 'this is subject text'
innerHeaders['content-type'] = 'text/plain'
msg = FakeyMessage(headers, (), None, outerBody, 123,
[FakeyMessage(innerHeaders, (), None, innerBody,
None, None)],
)
c = BufferingConsumer()
p = imap4.MessageProducer(msg)
d = p.beginProducing(c)
def cbProduced(result):
self.failUnlessIdentical(result, p)
self.assertEquals(
''.join(c.buffer),
'{239}\r\n'
'From: sender@host\r\n'
'To: recipient@domain\r\n'
'Subject: booga booga boo\r\n'
'Content-Type: multipart/alternative; boundary="xyz"\r\n'
'\r\n'
'\r\n'
'--xyz\r\n'
'Subject: this is subject text\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
+ innerBody
+ '\r\n--xyz--\r\n')
return d.addCallback(cbProduced)
def testMultipleMultiPart(self):
outerBody = ''
innerBody1 = 'Contained body message text. Squarge.'
innerBody2 = 'Secondary <i>message</i> text of squarge body.'
headers = util.OrderedDict()
headers['from'] = 'sender@host'
headers['to'] = 'recipient@domain'
headers['subject'] = 'booga booga boo'
headers['content-type'] = 'multipart/alternative; boundary="xyz"'
innerHeaders = util.OrderedDict()
innerHeaders['subject'] = 'this is subject text'
innerHeaders['content-type'] = 'text/plain'
innerHeaders2 = util.OrderedDict()
innerHeaders2['subject'] = '<b>this is subject</b>'
innerHeaders2['content-type'] = 'text/html'
msg = FakeyMessage(headers, (), None, outerBody, 123, [
FakeyMessage(innerHeaders, (), None, innerBody1, None, None),
FakeyMessage(innerHeaders2, (), None, innerBody2, None, None)
],
)
c = BufferingConsumer()
p = imap4.MessageProducer(msg)
d = p.beginProducing(c)
def cbProduced(result):
self.failUnlessIdentical(result, p)
self.assertEquals(
''.join(c.buffer),
'{354}\r\n'
'From: sender@host\r\n'
'To: recipient@domain\r\n'
'Subject: booga booga boo\r\n'
'Content-Type: multipart/alternative; boundary="xyz"\r\n'
'\r\n'
'\r\n'
'--xyz\r\n'
'Subject: this is subject text\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
+ innerBody1
+ '\r\n--xyz\r\n'
'Subject: <b>this is subject</b>\r\n'
'Content-Type: text/html\r\n'
'\r\n'
+ innerBody2
+ '\r\n--xyz--\r\n')
return d.addCallback(cbProduced)
class IMAP4HelperTestCase(unittest.TestCase):
"""
Tests for various helper utilities in the IMAP4 module.
"""
def test_fileProducer(self):
b = (('x' * 1) + ('y' * 1) + ('z' * 1)) * 10
c = BufferingConsumer()
f = StringIO(b)
p = imap4.FileProducer(f)
d = p.beginProducing(c)
def cbProduced(result):
self.failUnlessIdentical(result, p)
self.assertEquals(
('{%d}\r\n' % len(b))+ b,
''.join(c.buffer))
return d.addCallback(cbProduced)
def test_wildcard(self):
cases = [
['foo/%gum/bar',
['foo/bar', 'oo/lalagum/bar', 'foo/gumx/bar', 'foo/gum/baz'],
['foo/xgum/bar', 'foo/gum/bar'],
], ['foo/x%x/bar',
['foo', 'bar', 'fuz fuz fuz', 'foo/*/bar', 'foo/xyz/bar', 'foo/xx/baz'],
['foo/xyx/bar', 'foo/xx/bar', 'foo/xxxxxxxxxxxxxx/bar'],
], ['foo/xyz*abc/bar',
['foo/xyz/bar', 'foo/abc/bar', 'foo/xyzab/cbar', 'foo/xyza/bcbar'],
['foo/xyzabc/bar', 'foo/xyz/abc/bar', 'foo/xyz/123/abc/bar'],
]
]
for (wildcard, fail, succeed) in cases:
wildcard = imap4.wildcardToRegexp(wildcard, '/')
for x in fail:
self.failIf(wildcard.match(x))
for x in succeed:
self.failUnless(wildcard.match(x))
def test_wildcardNoDelim(self):
cases = [
['foo/%gum/bar',
['foo/bar', 'oo/lalagum/bar', 'foo/gumx/bar', 'foo/gum/baz'],
['foo/xgum/bar', 'foo/gum/bar', 'foo/x/gum/bar'],
], ['foo/x%x/bar',
['foo', 'bar', 'fuz fuz fuz', 'foo/*/bar', 'foo/xyz/bar', 'foo/xx/baz'],
['foo/xyx/bar', 'foo/xx/bar', 'foo/xxxxxxxxxxxxxx/bar', 'foo/x/x/bar'],
], ['foo/xyz*abc/bar',
['foo/xyz/bar', 'foo/abc/bar', 'foo/xyzab/cbar', 'foo/xyza/bcbar'],
['foo/xyzabc/bar', 'foo/xyz/abc/bar', 'foo/xyz/123/abc/bar'],
]
]
for (wildcard, fail, succeed) in cases:
wildcard = imap4.wildcardToRegexp(wildcard, None)
for x in fail:
self.failIf(wildcard.match(x), x)
for x in succeed:
self.failUnless(wildcard.match(x), x)
def test_headerFormatter(self):
cases = [
({'Header1': 'Value1', 'Header2': 'Value2'}, 'Header2: Value2\r\nHeader1: Value1\r\n'),
]
for (input, output) in cases:
self.assertEquals(imap4._formatHeaders(input), output)
def test_messageSet(self):
m1 = MessageSet()
m2 = MessageSet()
self.assertEquals(m1, m2)
m1 = m1 + (1, 3)
self.assertEquals(len(m1), 3)
self.assertEquals(list(m1), [1, 2, 3])
m2 = m2 + (1, 3)
self.assertEquals(m1, m2)
self.assertEquals(list(m1 + m2), [1, 2, 3])
def test_messageSetStringRepresentationWithWildcards(self):
"""
In a L{MessageSet}, in the presence of wildcards, if the highest message
id is known, the wildcard should get replaced by that high value.
"""
inputs = [
MessageSet(imap4.parseIdList('*')),
MessageSet(imap4.parseIdList('3:*', 6)),
MessageSet(imap4.parseIdList('*:2', 6)),
]
outputs = [
"*",
"3:6",
"2:6",
]
for i, o in zip(inputs, outputs):
self.assertEquals(str(i), o)
def test_messageSetStringRepresentationWithInversion(self):
"""
In a L{MessageSet}, inverting the high and low numbers in a range
doesn't affect the meaning of the range. For example, 3:2 displays just
like 2:3, because according to the RFC they have the same meaning.
"""
inputs = [
MessageSet(imap4.parseIdList('2:3')),
MessageSet(imap4.parseIdList('3:2')),
]
outputs = [
"2:3",
"2:3",
]
for i, o in zip(inputs, outputs):
self.assertEquals(str(i), o)
def test_quotedSplitter(self):
cases = [
'''Hello World''',
'''Hello "World!"''',
'''World "Hello" "How are you?"''',
'''"Hello world" How "are you?"''',
'''foo bar "baz buz" NIL''',
'''foo bar "baz buz" "NIL"''',
'''foo NIL "baz buz" bar''',
'''foo "NIL" "baz buz" bar''',
'''"NIL" bar "baz buz" foo''',
'oo \\"oo\\" oo',
'"oo \\"oo\\" oo"',
'oo \t oo',
'"oo \t oo"',
'oo \\t oo',
'"oo \\t oo"',
'oo \o oo',
'"oo \o oo"',
'oo \\o oo',
'"oo \\o oo"',
]
answers = [
['Hello', 'World'],
['Hello', 'World!'],
['World', 'Hello', 'How are you?'],
['Hello world', 'How', 'are you?'],
['foo', 'bar', 'baz buz', None],
['foo', 'bar', 'baz buz', 'NIL'],
['foo', None, 'baz buz', 'bar'],
['foo', 'NIL', 'baz buz', 'bar'],
['NIL', 'bar', 'baz buz', 'foo'],
['oo', '"oo"', 'oo'],
['oo "oo" oo'],
['oo', 'oo'],
['oo \t oo'],
['oo', '\\t', 'oo'],
['oo \\t oo'],
['oo', '\o', 'oo'],
['oo \o oo'],
['oo', '\\o', 'oo'],
['oo \\o oo'],
]
errors = [
'"mismatched quote',
'mismatched quote"',
'mismatched"quote',
'"oops here is" another"',
]
for s in errors:
self.assertRaises(imap4.MismatchedQuoting, imap4.splitQuoted, s)
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.splitQuoted(case), expected)
def test_stringCollapser(self):
cases = [
['a', 'b', 'c', 'd', 'e'],
['a', ' ', '"', 'b', 'c', ' ', '"', ' ', 'd', 'e'],
[['a', 'b', 'c'], 'd', 'e'],
['a', ['b', 'c', 'd'], 'e'],
['a', 'b', ['c', 'd', 'e']],
['"', 'a', ' ', '"', ['b', 'c', 'd'], '"', ' ', 'e', '"'],
['a', ['"', ' ', 'b', 'c', ' ', ' ', '"'], 'd', 'e'],
]
answers = [
['abcde'],
['a', 'bc ', 'de'],
[['abc'], 'de'],
['a', ['bcd'], 'e'],
['ab', ['cde']],
['a ', ['bcd'], ' e'],
['a', [' bc '], 'de'],
]
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.collapseStrings(case), expected)
def test_parenParser(self):
s = '\r\n'.join(['xx'] * 4)
cases = [
'(BODY.PEEK[HEADER.FIELDS.NOT (subject bcc cc)] {%d}\r\n%s)' % (len(s), s,),
# '(FLAGS (\Seen) INTERNALDATE "17-Jul-1996 02:44:25 -0700" '
# 'RFC822.SIZE 4286 ENVELOPE ("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" '
# '"IMAP4rev1 WG mtg summary and minutes" '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '(("Terry Gray" NIL "gray" "cac.washington.edu")) '
# '((NIL NIL "imap" "cac.washington.edu")) '
# '((NIL NIL "minutes" "CNRI.Reston.VA.US") '
# '("John Klensin" NIL "KLENSIN" "INFOODS.MIT.EDU")) NIL NIL '
# '"<B27397-0100000@cac.washington.edu>") '
# 'BODY ("TEXT" "PLAIN" ("CHARSET" "US-ASCII") NIL NIL "7BIT" 3028 92))',
'(FLAGS (\Seen) INTERNALDATE "17-Jul-1996 02:44:25 -0700" '
'RFC822.SIZE 4286 ENVELOPE ("Wed, 17 Jul 1996 02:23:25 -0700 (PDT)" '
'"IMAP4rev1 WG mtg summary and minutes" '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'(("Terry Gray" NIL gray cac.washington.edu)) '
'((NIL NIL imap cac.washington.edu)) '
'((NIL NIL minutes CNRI.Reston.VA.US) '
'("John Klensin" NIL KLENSIN INFOODS.MIT.EDU)) NIL NIL '
'<B27397-0100000@cac.washington.edu>) '
'BODY (TEXT PLAIN (CHARSET US-ASCII) NIL NIL 7BIT 3028 92))',
'("oo \\"oo\\" oo")',
'("oo \\\\ oo")',
'("oo \\ oo")',
'("oo \\o")',
'("oo \o")',
'(oo \o)',
'(oo \\o)',
]
answers = [
['BODY.PEEK', ['HEADER.FIELDS.NOT', ['subject', 'bcc', 'cc']], s],
['FLAGS', [r'\Seen'], 'INTERNALDATE',
'17-Jul-1996 02:44:25 -0700', 'RFC822.SIZE', '4286', 'ENVELOPE',
['Wed, 17 Jul 1996 02:23:25 -0700 (PDT)',
'IMAP4rev1 WG mtg summary and minutes', [["Terry Gray", None,
"gray", "cac.washington.edu"]], [["Terry Gray", None,
"gray", "cac.washington.edu"]], [["Terry Gray", None,
"gray", "cac.washington.edu"]], [[None, None, "imap",
"cac.washington.edu"]], [[None, None, "minutes",
"CNRI.Reston.VA.US"], ["John Klensin", None, "KLENSIN",
"INFOODS.MIT.EDU"]], None, None,
"<B27397-0100000@cac.washington.edu>"], "BODY", ["TEXT", "PLAIN",
["CHARSET", "US-ASCII"], None, None, "7BIT", "3028", "92"]],
['oo "oo" oo'],
['oo \\\\ oo'],
['oo \\ oo'],
['oo \\o'],
['oo \o'],
['oo', '\o'],
['oo', '\\o'],
]
for (case, expected) in zip(cases, answers):
self.assertEquals(imap4.parseNestedParens(case), [expected])
# XXX This code used to work, but changes occurred within the
# imap4.py module which made it no longer necessary for *all* of it
# to work. In particular, only the part that makes
# 'BODY.PEEK[HEADER.FIELDS.NOT (Subject Bcc Cc)]' come out correctly
# no longer needs to work. So, I am loathe to delete the entire
# section of the test. --exarkun
#
# for (case, expected) in zip(answers, cases):
# self.assertEquals('(' + imap4.collapseNestedLists(case) + ')', expected)
def test_fetchParserSimple(self):
cases = [
['ENVELOPE', 'Envelope'],
['FLAGS', 'Flags'],
['INTERNALDATE', 'InternalDate'],
['RFC822.HEADER', 'RFC822Header'],
['RFC822.SIZE', 'RFC822Size'],
['RFC822.TEXT', 'RFC822Text'],
['RFC822', 'RFC822'],
['UID', 'UID'],
['BODYSTRUCTURE', 'BodyStructure'],
]
for (inp, outp) in cases:
p = imap4._FetchParser()
p.parseString(inp)
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], getattr(p, outp)))
def test_fetchParserMacros(self):
cases = [
['ALL', (4, ['flags', 'internaldate', 'rfc822.size', 'envelope'])],
['FULL', (5, ['flags', 'internaldate', 'rfc822.size', 'envelope', 'body'])],
['FAST', (3, ['flags', 'internaldate', 'rfc822.size'])],
]
for (inp, outp) in cases:
p = imap4._FetchParser()
p.parseString(inp)
self.assertEquals(len(p.result), outp[0])
p = [str(p).lower() for p in p.result]
p.sort()
outp[1].sort()
self.assertEquals(p, outp[1])
def test_fetchParserBody(self):
P = imap4._FetchParser
p = P()
p.parseString('BODY')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, False)
self.assertEquals(p.result[0].header, None)
self.assertEquals(str(p.result[0]), 'BODY')
p = P()
p.parseString('BODY.PEEK')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, True)
self.assertEquals(str(p.result[0]), 'BODY')
p = P()
p.parseString('BODY[]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].empty, True)
self.assertEquals(str(p.result[0]), 'BODY[]')
p = P()
p.parseString('BODY[HEADER]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, False)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].header.negate, True)
self.assertEquals(p.result[0].header.fields, ())
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[HEADER]')
p = P()
p.parseString('BODY.PEEK[HEADER]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, True)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].header.negate, True)
self.assertEquals(p.result[0].header.fields, ())
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[HEADER]')
p = P()
p.parseString('BODY[HEADER.FIELDS (Subject Cc Message-Id)]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, False)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].header.negate, False)
self.assertEquals(p.result[0].header.fields, ['SUBJECT', 'CC', 'MESSAGE-ID'])
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[HEADER.FIELDS (Subject Cc Message-Id)]')
p = P()
p.parseString('BODY.PEEK[HEADER.FIELDS (Subject Cc Message-Id)]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, True)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].header.negate, False)
self.assertEquals(p.result[0].header.fields, ['SUBJECT', 'CC', 'MESSAGE-ID'])
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[HEADER.FIELDS (Subject Cc Message-Id)]')
p = P()
p.parseString('BODY.PEEK[HEADER.FIELDS.NOT (Subject Cc Message-Id)]')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, True)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].header.negate, True)
self.assertEquals(p.result[0].header.fields, ['SUBJECT', 'CC', 'MESSAGE-ID'])
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[HEADER.FIELDS.NOT (Subject Cc Message-Id)]')
p = P()
p.parseString('BODY[1.MIME]<10.50>')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, False)
self.failUnless(isinstance(p.result[0].mime, p.MIME))
self.assertEquals(p.result[0].part, (0,))
self.assertEquals(p.result[0].partialBegin, 10)
self.assertEquals(p.result[0].partialLength, 50)
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[1.MIME]<10.50>')
p = P()
p.parseString('BODY.PEEK[1.3.9.11.HEADER.FIELDS.NOT (Message-Id Date)]<103.69>')
self.assertEquals(len(p.result), 1)
self.failUnless(isinstance(p.result[0], p.Body))
self.assertEquals(p.result[0].peek, True)
self.failUnless(isinstance(p.result[0].header, p.Header))
self.assertEquals(p.result[0].part, (0, 2, 8, 10))
self.assertEquals(p.result[0].header.fields, ['MESSAGE-ID', 'DATE'])
self.assertEquals(p.result[0].partialBegin, 103)
self.assertEquals(p.result[0].partialLength, 69)
self.assertEquals(p.result[0].empty, False)
self.assertEquals(str(p.result[0]), 'BODY[1.3.9.11.HEADER.FIELDS.NOT (Message-Id Date)]<103.69>')
def test_files(self):
inputStructure = [
'foo', 'bar', 'baz', StringIO('this is a file\r\n'), 'buz'
]
output = '"foo" "bar" "baz" {16}\r\nthis is a file\r\n "buz"'
self.assertEquals(imap4.collapseNestedLists(inputStructure), output)
def test_quoteAvoider(self):
input = [
'foo', imap4.DontQuoteMe('bar'), "baz", StringIO('this is a file\r\n'),
imap4.DontQuoteMe('buz'), ""
]
output = '"foo" bar "baz" {16}\r\nthis is a file\r\n buz ""'
self.assertEquals(imap4.collapseNestedLists(input), output)
def test_literals(self):
cases = [
('({10}\r\n0123456789)', [['0123456789']]),
]
for (case, expected) in cases:
self.assertEquals(imap4.parseNestedParens(case), expected)
def test_queryBuilder(self):
inputs = [
imap4.Query(flagged=1),
imap4.Query(sorted=1, unflagged=1, deleted=1),
imap4.Or(imap4.Query(flagged=1), imap4.Query(deleted=1)),
imap4.Query(before='today'),
imap4.Or(
imap4.Query(deleted=1),
imap4.Query(unseen=1),
imap4.Query(new=1)
),
imap4.Or(
imap4.Not(
imap4.Or(
imap4.Query(sorted=1, since='yesterday', smaller=1000),
imap4.Query(sorted=1, before='tuesday', larger=10000),
imap4.Query(sorted=1, unseen=1, deleted=1, before='today'),
imap4.Not(
imap4.Query(subject='spam')
),
),
),
imap4.Not(
imap4.Query(uid='1:5')
),
)
]
outputs = [
'FLAGGED',
'(DELETED UNFLAGGED)',
'(OR FLAGGED DELETED)',
'(BEFORE "today")',
'(OR DELETED (OR UNSEEN NEW))',
'(OR (NOT (OR (SINCE "yesterday" SMALLER 1000) ' # Continuing
'(OR (BEFORE "tuesday" LARGER 10000) (OR (BEFORE ' # Some more
'"today" DELETED UNSEEN) (NOT (SUBJECT "spam")))))) ' # And more
'(NOT (UID 1:5)))',
]
for (query, expected) in zip(inputs, outputs):
self.assertEquals(query, expected)
def test_invalidIdListParser(self):
"""
Trying to parse an invalid representation of a sequence range raises an
L{IllegalIdentifierError}.
"""
inputs = [
'*:*',
'foo',
'4:',
'bar:5'
]
for input in inputs:
self.assertRaises(imap4.IllegalIdentifierError,
imap4.parseIdList, input, 12345)
def test_invalidIdListParserNonPositive(self):
"""
Zeroes and negative values are not accepted in id range expressions. RFC
3501 states that sequence numbers and sequence ranges consist of
non-negative numbers (RFC 3501 section 9, the seq-number grammar item).
"""
inputs = [
'0:5',
'0:0',
'*:0',
'0',
'-3:5',
'1:-2',
'-1'
]
for input in inputs:
self.assertRaises(imap4.IllegalIdentifierError,
imap4.parseIdList, input, 12345)
def test_parseIdList(self):
"""
The function to parse sequence ranges yields appropriate L{MessageSet}
objects.
"""
inputs = [
'1:*',
'5:*',
'1:2,5:*',
'*',
'1',
'1,2',
'1,3,5',
'1:10',
'1:10,11',
'1:5,10:20',
'1,5:10',
'1,5:10,15:20',
'1:10,15,20:25',
'4:2'
]
outputs = [
MessageSet(1, None),
MessageSet(5, None),
MessageSet(5, None) + MessageSet(1, 2),
MessageSet(None, None),
MessageSet(1),
MessageSet(1, 2),
MessageSet(1) + MessageSet(3) + MessageSet(5),
MessageSet(1, 10),
MessageSet(1, 11),
MessageSet(1, 5) + MessageSet(10, 20),
MessageSet(1) + MessageSet(5, 10),
MessageSet(1) + MessageSet(5, 10) + MessageSet(15, 20),
MessageSet(1, 10) + MessageSet(15) + MessageSet(20, 25),
MessageSet(2, 4),
]
lengths = [
None, None, None,
1, 1, 2, 3, 10, 11, 16, 7, 13, 17, 3
]
for (input, expected) in zip(inputs, outputs):
self.assertEquals(imap4.parseIdList(input), expected)
for (input, expected) in zip(inputs, lengths):
if expected is None:
self.assertRaises(TypeError, len, imap4.parseIdList(input))
else:
L = len(imap4.parseIdList(input))
self.assertEquals(L, expected,
"len(%r) = %r != %r" % (input, L, expected))
class SimpleMailbox:
implements(imap4.IMailboxInfo, imap4.IMailbox, imap4.ICloseableMailbox)
flags = ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag')
messages = []
mUID = 0
rw = 1
closed = False
def __init__(self):
self.listeners = []
self.addListener = self.listeners.append
self.removeListener = self.listeners.remove
def getFlags(self):
return self.flags
def getUIDValidity(self):
return 42
def getUIDNext(self):
return len(self.messages) + 1
def getMessageCount(self):
return 9
def getRecentCount(self):
return 3
def getUnseenCount(self):
return 4
def isWriteable(self):
return self.rw
def destroy(self):
pass
def getHierarchicalDelimiter(self):
return '/'
def requestStatus(self, names):
r = {}
if 'MESSAGES' in names:
r['MESSAGES'] = self.getMessageCount()
if 'RECENT' in names:
r['RECENT'] = self.getRecentCount()
if 'UIDNEXT' in names:
r['UIDNEXT'] = self.getMessageCount() + 1
if 'UIDVALIDITY' in names:
r['UIDVALIDITY'] = self.getUID()
if 'UNSEEN' in names:
r['UNSEEN'] = self.getUnseenCount()
return defer.succeed(r)
def addMessage(self, message, flags, date = None):
self.messages.append((message, flags, date, self.mUID))
self.mUID += 1
return defer.succeed(None)
def expunge(self):
delete = []
for i in self.messages:
if '\\Deleted' in i[1]:
delete.append(i)
for i in delete:
self.messages.remove(i)
return [i[3] for i in delete]
def close(self):
self.closed = True
class Account(imap4.MemoryAccount):
mailboxFactory = SimpleMailbox
def _emptyMailbox(self, name, id):
return self.mailboxFactory()
def select(self, name, rw=1):
mbox = imap4.MemoryAccount.select(self, name)
if mbox is not None:
mbox.rw = rw
return mbox
class SimpleServer(imap4.IMAP4Server):
def __init__(self, *args, **kw):
imap4.IMAP4Server.__init__(self, *args, **kw)
realm = TestRealm()
realm.theAccount = Account('testuser')
portal = cred.portal.Portal(realm)
c = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.checker = c
self.portal = portal
portal.registerChecker(c)
self.timeoutTest = False
def lineReceived(self, line):
if self.timeoutTest:
#Do not send a respones
return
imap4.IMAP4Server.lineReceived(self, line)
_username = 'testuser'
_password = 'password-test'
def authenticateLogin(self, username, password):
if username == self._username and password == self._password:
return imap4.IAccount, self.theAccount, lambda: None
raise cred.error.UnauthorizedLogin()
class SimpleClient(imap4.IMAP4Client):
def __init__(self, deferred, contextFactory = None):
imap4.IMAP4Client.__init__(self, contextFactory)
self.deferred = deferred
self.events = []
def serverGreeting(self, caps):
self.deferred.callback(None)
def modeChanged(self, writeable):
self.events.append(['modeChanged', writeable])
self.transport.loseConnection()
def flagsChanged(self, newFlags):
self.events.append(['flagsChanged', newFlags])
self.transport.loseConnection()
def newMessages(self, exists, recent):
self.events.append(['newMessages', exists, recent])
self.transport.loseConnection()
class IMAP4HelperMixin:
serverCTX = None
clientCTX = None
def setUp(self):
d = defer.Deferred()
self.server = SimpleServer(contextFactory=self.serverCTX)
self.client = SimpleClient(d, contextFactory=self.clientCTX)
self.connected = d
SimpleMailbox.messages = []
theAccount = Account('testuser')
theAccount.mboxType = SimpleMailbox
SimpleServer.theAccount = theAccount
def tearDown(self):
del self.server
del self.client
del self.connected
def _cbStopClient(self, ignore):
self.client.transport.loseConnection()
def _ebGeneral(self, failure):
self.client.transport.loseConnection()
self.server.transport.loseConnection()
failure.raiseException()
def loopback(self):
return loopback.loopbackAsync(self.server, self.client)
class IMAP4ServerTestCase(IMAP4HelperMixin, unittest.TestCase):
def testCapability(self):
caps = {}
def getCaps():
def gotCaps(c):
caps.update(c)
self.server.transport.loseConnection()
return self.client.getCapabilities().addCallback(gotCaps)
d1 = self.connected.addCallback(strip(getCaps)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
expected = {'IMAP4rev1': None, 'NAMESPACE': None, 'IDLE': None}
return d.addCallback(lambda _: self.assertEquals(expected, caps))
def testCapabilityWithAuth(self):
caps = {}
self.server.challengers['CRAM-MD5'] = cred.credentials.CramMD5Credentials
def getCaps():
def gotCaps(c):
caps.update(c)
self.server.transport.loseConnection()
return self.client.getCapabilities().addCallback(gotCaps)
d1 = self.connected.addCallback(strip(getCaps)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
expCap = {'IMAP4rev1': None, 'NAMESPACE': None,
'IDLE': None, 'AUTH': ['CRAM-MD5']}
return d.addCallback(lambda _: self.assertEquals(expCap, caps))
def testLogout(self):
self.loggedOut = 0
def logout():
def setLoggedOut():
self.loggedOut = 1
self.client.logout().addCallback(strip(setLoggedOut))
self.connected.addCallback(strip(logout)).addErrback(self._ebGeneral)
d = self.loopback()
return d.addCallback(lambda _: self.assertEquals(self.loggedOut, 1))
def testNoop(self):
self.responses = None
def noop():
def setResponses(responses):
self.responses = responses
self.server.transport.loseConnection()
self.client.noop().addCallback(setResponses)
self.connected.addCallback(strip(noop)).addErrback(self._ebGeneral)
d = self.loopback()
return d.addCallback(lambda _: self.assertEquals(self.responses, []))
def testLogin(self):
def login():
d = self.client.login('testuser', 'password-test')
d.addCallback(self._cbStopClient)
d1 = self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)
d = defer.gatherResults([d1, self.loopback()])
return d.addCallback(self._cbTestLogin)
def _cbTestLogin(self, ignored):
self.assertEquals(self.server.account, SimpleServer.theAccount)
self.assertEquals(self.server.state, 'auth')
def testFailedLogin(self):
def login():
d = self.client.login('testuser', 'wrong-password')
d.addBoth(self._cbStopClient)
d1 = self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestFailedLogin)
def _cbTestFailedLogin(self, ignored):
self.assertEquals(self.server.account, None)
self.assertEquals(self.server.state, 'unauth')
def testLoginRequiringQuoting(self):
self.server._username = '{test}user'
self.server._password = '{test}password'
def login():
d = self.client.login('{test}user', '{test}password')
d.addBoth(self._cbStopClient)
d1 = self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestLoginRequiringQuoting)
def _cbTestLoginRequiringQuoting(self, ignored):
self.assertEquals(self.server.account, SimpleServer.theAccount)
self.assertEquals(self.server.state, 'auth')
def testNamespace(self):
self.namespaceArgs = None
def login():
return self.client.login('testuser', 'password-test')
def namespace():
def gotNamespace(args):
self.namespaceArgs = args
self._cbStopClient(None)
return self.client.namespace().addCallback(gotNamespace)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(namespace))
d1.addErrback(self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _: self.assertEquals(self.namespaceArgs,
[[['', '/']], [], []]))
return d
def testSelect(self):
SimpleServer.theAccount.addMailbox('test-mailbox')
self.selectedArgs = None
def login():
return self.client.login('testuser', 'password-test')
def select():
def selected(args):
self.selectedArgs = args
self._cbStopClient(None)
d = self.client.select('test-mailbox')
d.addCallback(selected)
return d
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(select))
d1.addErrback(self._ebGeneral)
d2 = self.loopback()
return defer.gatherResults([d1, d2]).addCallback(self._cbTestSelect)
def _cbTestSelect(self, ignored):
mbox = SimpleServer.theAccount.mailboxes['TEST-MAILBOX']
self.assertEquals(self.server.mbox, mbox)
self.assertEquals(self.selectedArgs, {
'EXISTS': 9, 'RECENT': 3, 'UIDVALIDITY': 42,
'FLAGS': ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag'),
'READ-WRITE': 1
})
def test_examine(self):
"""
L{IMAP4Client.examine} issues an I{EXAMINE} command to the server and
returns a L{Deferred} which fires with a C{dict} with as many of the
following keys as the server includes in its response: C{'FLAGS'},
C{'EXISTS'}, C{'RECENT'}, C{'UNSEEN'}, C{'READ-WRITE'}, C{'READ-ONLY'},
C{'UIDVALIDITY'}, and C{'PERMANENTFLAGS'}.
Unfortunately the server doesn't generate all of these so it's hard to
test the client's handling of them here. See
L{IMAP4ClientExamineTests} below.
See U{RFC 3501<http://www.faqs.org/rfcs/rfc3501.html>}, section 6.3.2,
for details.
"""
SimpleServer.theAccount.addMailbox('test-mailbox')
self.examinedArgs = None
def login():
return self.client.login('testuser', 'password-test')
def examine():
def examined(args):
self.examinedArgs = args
self._cbStopClient(None)
d = self.client.examine('test-mailbox')
d.addCallback(examined)
return d
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(examine))
d1.addErrback(self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestExamine)
def _cbTestExamine(self, ignored):
mbox = SimpleServer.theAccount.mailboxes['TEST-MAILBOX']
self.assertEquals(self.server.mbox, mbox)
self.assertEquals(self.examinedArgs, {
'EXISTS': 9, 'RECENT': 3, 'UIDVALIDITY': 42,
'FLAGS': ('\\Flag1', 'Flag2', '\\AnotherSysFlag', 'LastFlag'),
'READ-WRITE': False})
def testCreate(self):
succeed = ('testbox', 'test/box', 'test/', 'test/box/box', 'INBOX')
fail = ('testbox', 'test/box')
def cb(): self.result.append(1)
def eb(failure): self.result.append(0)
def login():
return self.client.login('testuser', 'password-test')
def create():
for name in succeed + fail:
d = self.client.create(name)
d.addCallback(strip(cb)).addErrback(eb)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
self.result = []
d1 = self.connected.addCallback(strip(login)).addCallback(strip(create))
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestCreate, succeed, fail)
def _cbTestCreate(self, ignored, succeed, fail):
self.assertEquals(self.result, [1] * len(succeed) + [0] * len(fail))
mbox = SimpleServer.theAccount.mailboxes.keys()
answers = ['inbox', 'testbox', 'test/box', 'test', 'test/box/box']
mbox.sort()
answers.sort()
self.assertEquals(mbox, [a.upper() for a in answers])
def testDelete(self):
SimpleServer.theAccount.addMailbox('delete/me')
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete/me')
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(delete), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _:
self.assertEquals(SimpleServer.theAccount.mailboxes.keys(), []))
return d
def testIllegalInboxDelete(self):
self.stashed = None
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('inbox')
def stash(result):
self.stashed = result
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(delete), self._ebGeneral)
d1.addBoth(stash)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _: self.failUnless(isinstance(self.stashed,
failure.Failure)))
return d
def testNonExistentDelete(self):
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete/me')
def deleteFailed(failure):
self.failure = failure
self.failure = None
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(delete)).addErrback(deleteFailed)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _: self.assertEquals(str(self.failure.value),
'No such mailbox'))
return d
def testIllegalDelete(self):
m = SimpleMailbox()
m.flags = (r'\Noselect',)
SimpleServer.theAccount.addMailbox('delete', m)
SimpleServer.theAccount.addMailbox('delete/me')
def login():
return self.client.login('testuser', 'password-test')
def delete():
return self.client.delete('delete')
def deleteFailed(failure):
self.failure = failure
self.failure = None
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(delete)).addErrback(deleteFailed)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
expected = "Hierarchically inferior mailboxes exist and \\Noselect is set"
d.addCallback(lambda _:
self.assertEquals(str(self.failure.value), expected))
return d
def testRename(self):
SimpleServer.theAccount.addMailbox('oldmbox')
def login():
return self.client.login('testuser', 'password-test')
def rename():
return self.client.rename('oldmbox', 'newname')
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(rename), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _:
self.assertEquals(SimpleServer.theAccount.mailboxes.keys(),
['NEWNAME']))
return d
def testIllegalInboxRename(self):
self.stashed = None
def login():
return self.client.login('testuser', 'password-test')
def rename():
return self.client.rename('inbox', 'frotz')
def stash(stuff):
self.stashed = stuff
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(rename), self._ebGeneral)
d1.addBoth(stash)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _:
self.failUnless(isinstance(self.stashed, failure.Failure)))
return d
def testHierarchicalRename(self):
SimpleServer.theAccount.create('oldmbox/m1')
SimpleServer.theAccount.create('oldmbox/m2')
def login():
return self.client.login('testuser', 'password-test')
def rename():
return self.client.rename('oldmbox', 'newname')
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(rename), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestHierarchicalRename)
def _cbTestHierarchicalRename(self, ignored):
mboxes = SimpleServer.theAccount.mailboxes.keys()
expected = ['newname', 'newname/m1', 'newname/m2']
mboxes.sort()
self.assertEquals(mboxes, [s.upper() for s in expected])
def testSubscribe(self):
def login():
return self.client.login('testuser', 'password-test')
def subscribe():
return self.client.subscribe('this/mbox')
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(subscribe), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _:
self.assertEquals(SimpleServer.theAccount.subscriptions,
['THIS/MBOX']))
return d
def testUnsubscribe(self):
SimpleServer.theAccount.subscriptions = ['THIS/MBOX', 'THAT/MBOX']
def login():
return self.client.login('testuser', 'password-test')
def unsubscribe():
return self.client.unsubscribe('this/mbox')
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(unsubscribe), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _:
self.assertEquals(SimpleServer.theAccount.subscriptions,
['THAT/MBOX']))
return d
def _listSetup(self, f):
SimpleServer.theAccount.addMailbox('root/subthing')
SimpleServer.theAccount.addMailbox('root/another-thing')
SimpleServer.theAccount.addMailbox('non-root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def listed(answers):
self.listed = answers
self.listed = None
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(f), self._ebGeneral)
d1.addCallbacks(listed, self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
return defer.gatherResults([d1, d2]).addCallback(lambda _: self.listed)
def testList(self):
def list():
return self.client.list('root', '%')
d = self._listSetup(list)
d.addCallback(lambda listed: self.assertEquals(
sortNest(listed),
sortNest([
(SimpleMailbox.flags, "/", "ROOT/SUBTHING"),
(SimpleMailbox.flags, "/", "ROOT/ANOTHER-THING")
])
))
return d
def testLSub(self):
SimpleServer.theAccount.subscribe('ROOT/SUBTHING')
def lsub():
return self.client.lsub('root', '%')
d = self._listSetup(lsub)
d.addCallback(self.assertEquals,
[(SimpleMailbox.flags, "/", "ROOT/SUBTHING")])
return d
def testStatus(self):
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def status():
return self.client.status('root/subthing', 'MESSAGES', 'UIDNEXT', 'UNSEEN')
def statused(result):
self.statused = result
self.statused = None
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(status), self._ebGeneral)
d1.addCallbacks(statused, self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
d.addCallback(lambda _: self.assertEquals(
self.statused,
{'MESSAGES': 9, 'UIDNEXT': '10', 'UNSEEN': 4}
))
return d
def testFailedStatus(self):
def login():
return self.client.login('testuser', 'password-test')
def status():
return self.client.status('root/nonexistent', 'MESSAGES', 'UIDNEXT', 'UNSEEN')
def statused(result):
self.statused = result
def failed(failure):
self.failure = failure
self.statused = self.failure = None
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(status), self._ebGeneral)
d1.addCallbacks(statused, failed)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
return defer.gatherResults([d1, d2]).addCallback(self._cbTestFailedStatus)
def _cbTestFailedStatus(self, ignored):
self.assertEquals(
self.statused, None
)
self.assertEquals(
self.failure.value.args,
('Could not open mailbox',)
)
def testFullAppend(self):
infile = util.sibpath(__file__, 'rfc822.message')
message = open(infile)
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def append():
return self.client.append(
'root/subthing',
message,
('\\SEEN', '\\DELETED'),
'Tue, 17 Jun 2003 11:22:16 -0600 (MDT)',
)
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(append), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestFullAppend, infile)
def _cbTestFullAppend(self, ignored, infile):
mb = SimpleServer.theAccount.mailboxes['ROOT/SUBTHING']
self.assertEquals(1, len(mb.messages))
self.assertEquals(
(['\\SEEN', '\\DELETED'], 'Tue, 17 Jun 2003 11:22:16 -0600 (MDT)', 0),
mb.messages[0][1:]
)
self.assertEquals(open(infile).read(), mb.messages[0][0].getvalue())
def testPartialAppend(self):
infile = util.sibpath(__file__, 'rfc822.message')
message = open(infile)
SimpleServer.theAccount.addMailbox('PARTIAL/SUBTHING')
def login():
return self.client.login('testuser', 'password-test')
def append():
message = file(infile)
return self.client.sendCommand(
imap4.Command(
'APPEND',
'PARTIAL/SUBTHING (\\SEEN) "Right now" {%d}' % os.path.getsize(infile),
(), self.client._IMAP4Client__cbContinueAppend, message
)
)
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(append), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestPartialAppend, infile)
def _cbTestPartialAppend(self, ignored, infile):
mb = SimpleServer.theAccount.mailboxes['PARTIAL/SUBTHING']
self.assertEquals(1, len(mb.messages))
self.assertEquals(
(['\\SEEN'], 'Right now', 0),
mb.messages[0][1:]
)
self.assertEquals(open(infile).read(), mb.messages[0][0].getvalue())
def testCheck(self):
SimpleServer.theAccount.addMailbox('root/subthing')
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('root/subthing')
def check():
return self.client.check()
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(select), self._ebGeneral)
d.addCallbacks(strip(check), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
return self.loopback()
# Okay, that was fun
def testClose(self):
m = SimpleMailbox()
m.messages = [
('Message 1', ('\\Deleted', 'AnotherFlag'), None, 0),
('Message 2', ('AnotherFlag',), None, 1),
('Message 3', ('\\Deleted',), None, 2),
]
SimpleServer.theAccount.addMailbox('mailbox', m)
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('mailbox')
def close():
return self.client.close()
d = self.connected.addCallback(strip(login))
d.addCallbacks(strip(select), self._ebGeneral)
d.addCallbacks(strip(close), self._ebGeneral)
d.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
return defer.gatherResults([d, d2]).addCallback(self._cbTestClose, m)
def _cbTestClose(self, ignored, m):
self.assertEquals(len(m.messages), 1)
self.assertEquals(m.messages[0], ('Message 2', ('AnotherFlag',), None, 1))
self.failUnless(m.closed)
def testExpunge(self):
m = SimpleMailbox()
m.messages = [
('Message 1', ('\\Deleted', 'AnotherFlag'), None, 0),
('Message 2', ('AnotherFlag',), None, 1),
('Message 3', ('\\Deleted',), None, 2),
]
SimpleServer.theAccount.addMailbox('mailbox', m)
def login():
return self.client.login('testuser', 'password-test')
def select():
return self.client.select('mailbox')
def expunge():
return self.client.expunge()
def expunged(results):
self.failIf(self.server.mbox is None)
self.results = results
self.results = None
d1 = self.connected.addCallback(strip(login))
d1.addCallbacks(strip(select), self._ebGeneral)
d1.addCallbacks(strip(expunge), self._ebGeneral)
d1.addCallbacks(expunged, self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestExpunge, m)
def _cbTestExpunge(self, ignored, m):
self.assertEquals(len(m.messages), 1)
self.assertEquals(m.messages[0], ('Message 2', ('AnotherFlag',), None, 1))
self.assertEquals(self.results, [0, 2])
class IMAP4ServerSearchTestCase(IMAP4HelperMixin, unittest.TestCase):
"""
Tests for the behavior of the search_* functions in L{imap4.IMAP4Server}.
"""
def setUp(self):
IMAP4HelperMixin.setUp(self)
self.earlierQuery = ["10-Dec-2009"]
self.sameDateQuery = ["13-Dec-2009"]
self.laterQuery = ["16-Dec-2009"]
self.seq = 0
self.msg = FakeyMessage({"date" : "Mon, 13 Dec 2009 21:25:10 GMT"}, [],
'', '', 1234, None)
def test_searchSentBefore(self):
"""
L{imap4.IMAP4Server.search_SENTBEFORE} returns True if the message date
is earlier than the query date.
"""
self.assertFalse(
self.server.search_SENTBEFORE(self.earlierQuery, self.seq, self.msg))
self.assertTrue(
self.server.search_SENTBEFORE(self.laterQuery, self.seq, self.msg))
def test_searchWildcard(self):
"""
L{imap4.IMAP4Server.search_UID} returns True if the message UID is in
the search range.
"""
self.assertFalse(
self.server.search_UID(['2:3'], self.seq, self.msg, (1, 1234)))
# 2:* should get translated to 2:<max UID> and then to 1:2
self.assertTrue(
self.server.search_UID(['2:*'], self.seq, self.msg, (1, 1234)))
self.assertTrue(
self.server.search_UID(['*'], self.seq, self.msg, (1, 1234)))
def test_searchWildcardHigh(self):
"""
L{imap4.IMAP4Server.search_UID} should return True if there is a
wildcard, because a wildcard means "highest UID in the mailbox".
"""
self.assertTrue(
self.server.search_UID(['1235:*'], self.seq, self.msg, (1234, 1)))
def test_reversedSearchTerms(self):
"""
L{imap4.IMAP4Server.search_SENTON} returns True if the message date is
the same as the query date.
"""
msgset = imap4.parseIdList('4:2')
self.assertEquals(list(msgset), [2, 3, 4])
def test_searchSentOn(self):
"""
L{imap4.IMAP4Server.search_SENTON} returns True if the message date is
the same as the query date.
"""
self.assertFalse(
self.server.search_SENTON(self.earlierQuery, self.seq, self.msg))
self.assertTrue(
self.server.search_SENTON(self.sameDateQuery, self.seq, self.msg))
self.assertFalse(
self.server.search_SENTON(self.laterQuery, self.seq, self.msg))
def test_searchSentSince(self):
"""
L{imap4.IMAP4Server.search_SENTSINCE} returns True if the message date
is later than the query date.
"""
self.assertTrue(
self.server.search_SENTSINCE(self.earlierQuery, self.seq, self.msg))
self.assertFalse(
self.server.search_SENTSINCE(self.laterQuery, self.seq, self.msg))
def test_searchOr(self):
"""
L{imap4.IMAP4Server.search_OR} returns true if either of the two
expressions supplied to it returns true and returns false if neither
does.
"""
self.assertTrue(
self.server.search_OR(
["SENTSINCE"] + self.earlierQuery +
["SENTSINCE"] + self.laterQuery,
self.seq, self.msg, (None, None)))
self.assertTrue(
self.server.search_OR(
["SENTSINCE"] + self.laterQuery +
["SENTSINCE"] + self.earlierQuery,
self.seq, self.msg, (None, None)))
self.assertFalse(
self.server.search_OR(
["SENTON"] + self.laterQuery +
["SENTSINCE"] + self.laterQuery,
self.seq, self.msg, (None, None)))
def test_searchNot(self):
"""
L{imap4.IMAP4Server.search_NOT} returns the negation of the result
of the expression supplied to it.
"""
self.assertFalse(self.server.search_NOT(
["SENTSINCE"] + self.earlierQuery, self.seq, self.msg,
(None, None)))
self.assertTrue(self.server.search_NOT(
["SENTON"] + self.laterQuery, self.seq, self.msg,
(None, None)))
class TestRealm:
theAccount = None
def requestAvatar(self, avatarId, mind, *interfaces):
return imap4.IAccount, self.theAccount, lambda: None
class TestChecker:
credentialInterfaces = (cred.credentials.IUsernameHashedPassword, cred.credentials.IUsernamePassword)
users = {
'testuser': 'secret'
}
def requestAvatarId(self, credentials):
if credentials.username in self.users:
return defer.maybeDeferred(
credentials.checkPassword, self.users[credentials.username]
).addCallback(self._cbCheck, credentials.username)
def _cbCheck(self, result, username):
if result:
return username
raise cred.error.UnauthorizedLogin()
class AuthenticatorTestCase(IMAP4HelperMixin, unittest.TestCase):
def setUp(self):
IMAP4HelperMixin.setUp(self)
realm = TestRealm()
realm.theAccount = Account('testuser')
portal = cred.portal.Portal(realm)
portal.registerChecker(TestChecker())
self.server.portal = portal
self.authenticated = 0
self.account = realm.theAccount
def testCramMD5(self):
self.server.challengers['CRAM-MD5'] = cred.credentials.CramMD5Credentials
cAuth = imap4.CramMD5ClientAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def auth():
return self.client.authenticate('secret')
def authed():
self.authenticated = 1
d1 = self.connected.addCallback(strip(auth))
d1.addCallbacks(strip(authed), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d2 = self.loopback()
d = defer.gatherResults([d1, d2])
return d.addCallback(self._cbTestCramMD5)
def _cbTestCramMD5(self, ignored):
self.assertEquals(self.authenticated, 1)
self.assertEquals(self.server.account, self.account)
def testFailedCramMD5(self):
self.server.challengers['CRAM-MD5'] = cred.credentials.CramMD5Credentials
cAuth = imap4.CramMD5ClientAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def misauth():
return self.client.authenticate('not the secret')
def authed():
self.authenticated = 1
def misauthed():
self.authenticated = -1
d1 = self.connected.addCallback(strip(misauth))
d1.addCallbacks(strip(authed), strip(misauthed))
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestFailedCramMD5)
def _cbTestFailedCramMD5(self, ignored):
self.assertEquals(self.authenticated, -1)
self.assertEquals(self.server.account, None)
def testLOGIN(self):
self.server.challengers['LOGIN'] = imap4.LOGINCredentials
cAuth = imap4.LOGINAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def auth():
return self.client.authenticate('secret')
def authed():
self.authenticated = 1
d1 = self.connected.addCallback(strip(auth))
d1.addCallbacks(strip(authed), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestLOGIN)
def _cbTestLOGIN(self, ignored):
self.assertEquals(self.authenticated, 1)
self.assertEquals(self.server.account, self.account)
def testFailedLOGIN(self):
self.server.challengers['LOGIN'] = imap4.LOGINCredentials
cAuth = imap4.LOGINAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def misauth():
return self.client.authenticate('not the secret')
def authed():
self.authenticated = 1
def misauthed():
self.authenticated = -1
d1 = self.connected.addCallback(strip(misauth))
d1.addCallbacks(strip(authed), strip(misauthed))
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestFailedLOGIN)
def _cbTestFailedLOGIN(self, ignored):
self.assertEquals(self.authenticated, -1)
self.assertEquals(self.server.account, None)
def testPLAIN(self):
self.server.challengers['PLAIN'] = imap4.PLAINCredentials
cAuth = imap4.PLAINAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def auth():
return self.client.authenticate('secret')
def authed():
self.authenticated = 1
d1 = self.connected.addCallback(strip(auth))
d1.addCallbacks(strip(authed), self._ebGeneral)
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestPLAIN)
def _cbTestPLAIN(self, ignored):
self.assertEquals(self.authenticated, 1)
self.assertEquals(self.server.account, self.account)
def testFailedPLAIN(self):
self.server.challengers['PLAIN'] = imap4.PLAINCredentials
cAuth = imap4.PLAINAuthenticator('testuser')
self.client.registerAuthenticator(cAuth)
def misauth():
return self.client.authenticate('not the secret')
def authed():
self.authenticated = 1
def misauthed():
self.authenticated = -1
d1 = self.connected.addCallback(strip(misauth))
d1.addCallbacks(strip(authed), strip(misauthed))
d1.addCallbacks(self._cbStopClient, self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestFailedPLAIN)
def _cbTestFailedPLAIN(self, ignored):
self.assertEquals(self.authenticated, -1)
self.assertEquals(self.server.account, None)
class SASLPLAINTestCase(unittest.TestCase):
"""
Tests for I{SASL PLAIN} authentication, as implemented by
L{imap4.PLAINAuthenticator} and L{imap4.PLAINCredentials}.
@see: U{http://www.faqs.org/rfcs/rfc2595.html}
@see: U{http://www.faqs.org/rfcs/rfc4616.html}
"""
def test_authenticatorChallengeResponse(self):
"""
L{PLAINAuthenticator.challengeResponse} returns challenge strings of
the form::
NUL<authn-id>NUL<secret>
"""
username = 'testuser'
secret = 'secret'
chal = 'challenge'
cAuth = imap4.PLAINAuthenticator(username)
response = cAuth.challengeResponse(secret, chal)
self.assertEquals(response, '\0%s\0%s' % (username, secret))
def test_credentialsSetResponse(self):
"""
L{PLAINCredentials.setResponse} parses challenge strings of the
form::
NUL<authn-id>NUL<secret>
"""
cred = imap4.PLAINCredentials()
cred.setResponse('\0testuser\0secret')
self.assertEquals(cred.username, 'testuser')
self.assertEquals(cred.password, 'secret')
def test_credentialsInvalidResponse(self):
"""
L{PLAINCredentials.setResponse} raises L{imap4.IllegalClientResponse}
when passed a string not of the expected form.
"""
cred = imap4.PLAINCredentials()
self.assertRaises(
imap4.IllegalClientResponse, cred.setResponse, 'hello')
self.assertRaises(
imap4.IllegalClientResponse, cred.setResponse, 'hello\0world')
self.assertRaises(
imap4.IllegalClientResponse, cred.setResponse,
'hello\0world\0Zoom!\0')
class UnsolicitedResponseTestCase(IMAP4HelperMixin, unittest.TestCase):
def testReadWrite(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.modeChanged(1)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestReadWrite)
def _cbTestReadWrite(self, ignored):
E = self.client.events
self.assertEquals(E, [['modeChanged', 1]])
def testReadOnly(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.modeChanged(0)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestReadOnly)
def _cbTestReadOnly(self, ignored):
E = self.client.events
self.assertEquals(E, [['modeChanged', 0]])
def testFlagChange(self):
flags = {
1: ['\\Answered', '\\Deleted'],
5: [],
10: ['\\Recent']
}
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.flagsChanged(flags)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestFlagChange, flags)
def _cbTestFlagChange(self, ignored, flags):
E = self.client.events
expect = [['flagsChanged', {x[0]: x[1]}] for x in flags.items()]
E.sort()
expect.sort()
self.assertEquals(E, expect)
def testNewMessages(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(10, None)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestNewMessages)
def _cbTestNewMessages(self, ignored):
E = self.client.events
self.assertEquals(E, [['newMessages', 10, None]])
def testNewRecentMessages(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(None, 10)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestNewRecentMessages)
def _cbTestNewRecentMessages(self, ignored):
E = self.client.events
self.assertEquals(E, [['newMessages', None, 10]])
def testNewMessagesAndRecent(self):
def login():
return self.client.login('testuser', 'password-test')
def loggedIn():
self.server.newMessages(20, 10)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(loggedIn)).addErrback(self._ebGeneral)
d = defer.gatherResults([self.loopback(), d1])
return d.addCallback(self._cbTestNewMessagesAndRecent)
def _cbTestNewMessagesAndRecent(self, ignored):
E = self.client.events
self.assertEquals(E, [['newMessages', 20, None], ['newMessages', None, 10]])
class ClientCapabilityTests(unittest.TestCase):
"""
Tests for issuance of the CAPABILITY command and handling of its response.
"""
def setUp(self):
"""
Create an L{imap4.IMAP4Client} connected to a L{StringTransport}.
"""
self.transport = StringTransport()
self.protocol = imap4.IMAP4Client()
self.protocol.makeConnection(self.transport)
self.protocol.dataReceived('* OK [IMAP4rev1]\r\n')
def test_simpleAtoms(self):
"""
A capability response consisting only of atoms without C{'='} in them
should result in a dict mapping those atoms to C{None}.
"""
capabilitiesResult = self.protocol.getCapabilities(useCache=False)
self.protocol.dataReceived('* CAPABILITY IMAP4rev1 LOGINDISABLED\r\n')
self.protocol.dataReceived('0001 OK Capability completed.\r\n')
def gotCapabilities(capabilities):
self.assertEqual(
capabilities, {'IMAP4rev1': None, 'LOGINDISABLED': None})
capabilitiesResult.addCallback(gotCapabilities)
return capabilitiesResult
def test_categoryAtoms(self):
"""
A capability response consisting of atoms including C{'='} should have
those atoms split on that byte and have capabilities in the same
category aggregated into lists in the resulting dictionary.
(n.b. - I made up the word "category atom"; the protocol has no notion
of structure here, but rather allows each capability to define the
semantics of its entry in the capability response in a freeform manner.
If I had realized this earlier, the API for capabilities would look
different. As it is, we can hope that no one defines any crazy
semantics which are incompatible with this API, or try to figure out a
better API when someone does. -exarkun)
"""
capabilitiesResult = self.protocol.getCapabilities(useCache=False)
self.protocol.dataReceived('* CAPABILITY IMAP4rev1 AUTH=LOGIN AUTH=PLAIN\r\n')
self.protocol.dataReceived('0001 OK Capability completed.\r\n')
def gotCapabilities(capabilities):
self.assertEqual(
capabilities, {'IMAP4rev1': None, 'AUTH': ['LOGIN', 'PLAIN']})
capabilitiesResult.addCallback(gotCapabilities)
return capabilitiesResult
def test_mixedAtoms(self):
"""
A capability response consisting of both simple and category atoms of
the same type should result in a list containing C{None} as well as the
values for the category.
"""
capabilitiesResult = self.protocol.getCapabilities(useCache=False)
# Exercise codepath for both orderings of =-having and =-missing
# capabilities.
self.protocol.dataReceived(
'* CAPABILITY IMAP4rev1 FOO FOO=BAR BAR=FOO BAR\r\n')
self.protocol.dataReceived('0001 OK Capability completed.\r\n')
def gotCapabilities(capabilities):
self.assertEqual(capabilities, {'IMAP4rev1': None,
'FOO': [None, 'BAR'],
'BAR': ['FOO', None]})
capabilitiesResult.addCallback(gotCapabilities)
return capabilitiesResult
class StillSimplerClient(imap4.IMAP4Client):
"""
An IMAP4 client which keeps track of unsolicited flag changes.
"""
def __init__(self):
imap4.IMAP4Client.__init__(self)
self.flags = {}
def flagsChanged(self, newFlags):
self.flags.update(newFlags)
class HandCraftedTestCase(IMAP4HelperMixin, unittest.TestCase):
def testTrailingLiteral(self):
transport = StringTransport()
c = imap4.IMAP4Client()
c.makeConnection(transport)
c.lineReceived('* OK [IMAP4rev1]')
def cbSelect(ignored):
d = c.fetchMessage('1')
c.dataReceived('* 1 FETCH (RFC822 {10}\r\n0123456789\r\n RFC822.SIZE 10)\r\n')
c.dataReceived('0003 OK FETCH\r\n')
return d
def cbLogin(ignored):
d = c.select('inbox')
c.lineReceived('0002 OK SELECT')
d.addCallback(cbSelect)
return d
d = c.login('blah', 'blah')
c.dataReceived('0001 OK LOGIN\r\n')
d.addCallback(cbLogin)
return d
def testPathelogicalScatteringOfLiterals(self):
self.server.checker.addUser('testuser', 'password-test')
transport = StringTransport()
self.server.makeConnection(transport)
transport.clear()
self.server.dataReceived("01 LOGIN {8}\r\n")
self.assertEquals(transport.value(), "+ Ready for 8 octets of text\r\n")
transport.clear()
self.server.dataReceived("testuser {13}\r\n")
self.assertEquals(transport.value(), "+ Ready for 13 octets of text\r\n")
transport.clear()
self.server.dataReceived("password-test\r\n")
self.assertEquals(transport.value(), "01 OK LOGIN succeeded\r\n")
self.assertEquals(self.server.state, 'auth')
self.server.connectionLost(error.ConnectionDone("Connection done."))
def test_unsolicitedResponseMixedWithSolicitedResponse(self):
"""
If unsolicited data is received along with solicited data in the
response to a I{FETCH} command issued by L{IMAP4Client.fetchSpecific},
the unsolicited data is passed to the appropriate callback and not
included in the result with wihch the L{Deferred} returned by
L{IMAP4Client.fetchSpecific} fires.
"""
transport = StringTransport()
c = StillSimplerClient()
c.makeConnection(transport)
c.lineReceived('* OK [IMAP4rev1]')
def login():
d = c.login('blah', 'blah')
c.dataReceived('0001 OK LOGIN\r\n')
return d
def select():
d = c.select('inbox')
c.lineReceived('0002 OK SELECT')
return d
def fetch():
d = c.fetchSpecific('1:*',
headerType='HEADER.FIELDS',
headerArgs=['SUBJECT'])
c.dataReceived('* 1 FETCH (BODY[HEADER.FIELDS ("SUBJECT")] {38}\r\n')
c.dataReceived('Subject: Suprise for your woman...\r\n')
c.dataReceived('\r\n')
c.dataReceived(')\r\n')
c.dataReceived('* 1 FETCH (FLAGS (\Seen))\r\n')
c.dataReceived('* 2 FETCH (BODY[HEADER.FIELDS ("SUBJECT")] {75}\r\n')
c.dataReceived('Subject: What you been doing. Order your meds here . ,. handcuff madsen\r\n')
c.dataReceived('\r\n')
c.dataReceived(')\r\n')
c.dataReceived('0003 OK FETCH completed\r\n')
return d
def test(res):
self.assertEquals(res, {
1: [['BODY', ['HEADER.FIELDS', ['SUBJECT']],
'Subject: Suprise for your woman...\r\n\r\n']],
2: [['BODY', ['HEADER.FIELDS', ['SUBJECT']],
'Subject: What you been doing. Order your meds here . ,. handcuff madsen\r\n\r\n']]
})
self.assertEquals(c.flags, {1: ['\\Seen']})
return login(
).addCallback(strip(select)
).addCallback(strip(fetch)
).addCallback(test)
def test_literalWithoutPrecedingWhitespace(self):
"""
Literals should be recognized even when they are not preceded by
whitespace.
"""
transport = StringTransport()
protocol = imap4.IMAP4Client()
protocol.makeConnection(transport)
protocol.lineReceived('* OK [IMAP4rev1]')
def login():
d = protocol.login('blah', 'blah')
protocol.dataReceived('0001 OK LOGIN\r\n')
return d
def select():
d = protocol.select('inbox')
protocol.lineReceived('0002 OK SELECT')
return d
def fetch():
d = protocol.fetchSpecific('1:*',
headerType='HEADER.FIELDS',
headerArgs=['SUBJECT'])
protocol.dataReceived(
'* 1 FETCH (BODY[HEADER.FIELDS ({7}\r\nSUBJECT)] "Hello")\r\n')
protocol.dataReceived('0003 OK FETCH completed\r\n')
return d
def test(result):
self.assertEqual(
result, {1: [['BODY', ['HEADER.FIELDS', ['SUBJECT']], 'Hello']]})
d = login()
d.addCallback(strip(select))
d.addCallback(strip(fetch))
d.addCallback(test)
return d
def test_nonIntegerLiteralLength(self):
"""
If the server sends a literal length which cannot be parsed as an
integer, L{IMAP4Client.lineReceived} should cause the protocol to be
disconnected by raising L{imap4.IllegalServerResponse}.
"""
transport = StringTransport()
protocol = imap4.IMAP4Client()
protocol.makeConnection(transport)
protocol.lineReceived('* OK [IMAP4rev1]')
def login():
d = protocol.login('blah', 'blah')
protocol.dataReceived('0001 OK LOGIN\r\n')
return d
def select():
d = protocol.select('inbox')
protocol.lineReceived('0002 OK SELECT')
return d
def fetch():
d = protocol.fetchSpecific('1:*',
headerType='HEADER.FIELDS',
headerArgs=['SUBJECT'])
self.assertRaises(
imap4.IllegalServerResponse,
protocol.dataReceived,
'* 1 FETCH {xyz}\r\n...')
d = login()
d.addCallback(strip(select))
d.addCallback(strip(fetch))
return d
def test_flagsChangedInsideFetchSpecificResponse(self):
"""
Any unrequested flag information received along with other requested
information in an untagged I{FETCH} received in response to a request
issued with L{IMAP4Client.fetchSpecific} is passed to the
C{flagsChanged} callback.
"""
transport = StringTransport()
c = StillSimplerClient()
c.makeConnection(transport)
c.lineReceived('* OK [IMAP4rev1]')
def login():
d = c.login('blah', 'blah')
c.dataReceived('0001 OK LOGIN\r\n')
return d
def select():
d = c.select('inbox')
c.lineReceived('0002 OK SELECT')
return d
def fetch():
d = c.fetchSpecific('1:*',
headerType='HEADER.FIELDS',
headerArgs=['SUBJECT'])
# This response includes FLAGS after the requested data.
c.dataReceived('* 1 FETCH (BODY[HEADER.FIELDS ("SUBJECT")] {22}\r\n')
c.dataReceived('Subject: subject one\r\n')
c.dataReceived(' FLAGS (\\Recent))\r\n')
# And this one includes it before! Either is possible.
c.dataReceived('* 2 FETCH (FLAGS (\\Seen) BODY[HEADER.FIELDS ("SUBJECT")] {22}\r\n')
c.dataReceived('Subject: subject two\r\n')
c.dataReceived(')\r\n')
c.dataReceived('0003 OK FETCH completed\r\n')
return d
def test(res):
self.assertEquals(res, {
1: [['BODY', ['HEADER.FIELDS', ['SUBJECT']],
'Subject: subject one\r\n']],
2: [['BODY', ['HEADER.FIELDS', ['SUBJECT']],
'Subject: subject two\r\n']]
})
self.assertEquals(c.flags, {1: ['\\Recent'], 2: ['\\Seen']})
return login(
).addCallback(strip(select)
).addCallback(strip(fetch)
).addCallback(test)
def test_flagsChangedInsideFetchMessageResponse(self):
"""
Any unrequested flag information received along with other requested
information in an untagged I{FETCH} received in response to a request
issued with L{IMAP4Client.fetchMessage} is passed to the
C{flagsChanged} callback.
"""
transport = StringTransport()
c = StillSimplerClient()
c.makeConnection(transport)
c.lineReceived('* OK [IMAP4rev1]')
def login():
d = c.login('blah', 'blah')
c.dataReceived('0001 OK LOGIN\r\n')
return d
def select():
d = c.select('inbox')
c.lineReceived('0002 OK SELECT')
return d
def fetch():
d = c.fetchMessage('1:*')
c.dataReceived('* 1 FETCH (RFC822 {24}\r\n')
c.dataReceived('Subject: first subject\r\n')
c.dataReceived(' FLAGS (\Seen))\r\n')
c.dataReceived('* 2 FETCH (FLAGS (\Recent \Seen) RFC822 {25}\r\n')
c.dataReceived('Subject: second subject\r\n')
c.dataReceived(')\r\n')
c.dataReceived('0003 OK FETCH completed\r\n')
return d
def test(res):
self.assertEquals(res, {
1: {'RFC822': 'Subject: first subject\r\n'},
2: {'RFC822': 'Subject: second subject\r\n'}})
self.assertEquals(
c.flags, {1: ['\\Seen'], 2: ['\\Recent', '\\Seen']})
return login(
).addCallback(strip(select)
).addCallback(strip(fetch)
).addCallback(test)
class PreauthIMAP4ClientMixin:
"""
Mixin for L{unittest.TestCase} subclasses which provides a C{setUp} method
which creates an L{IMAP4Client} connected to a L{StringTransport} and puts
it into the I{authenticated} state.
@ivar transport: A L{StringTransport} to which C{client} is connected.
@ivar client: An L{IMAP4Client} which is connected to C{transport}.
"""
clientProtocol = imap4.IMAP4Client
def setUp(self):
"""
Create an IMAP4Client connected to a fake transport and in the
authenticated state.
"""
self.transport = StringTransport()
self.client = self.clientProtocol()
self.client.makeConnection(self.transport)
self.client.dataReceived('* PREAUTH Hello unittest\r\n')
def _extractDeferredResult(self, d):
"""
Synchronously extract the result of the given L{Deferred}. Fail the
test if that is not possible.
"""
result = []
error = []
d.addCallbacks(result.append, error.append)
if result:
return result[0]
elif error:
error[0].raiseException()
else:
self.fail("Expected result not available")
class SelectionTestsMixin(PreauthIMAP4ClientMixin):
"""
Mixin for test cases which defines tests which apply to both I{EXAMINE} and
I{SELECT} support.
"""
def _examineOrSelect(self):
"""
Issue either an I{EXAMINE} or I{SELECT} command (depending on
C{self.method}), assert that the correct bytes are written to the
transport, and return the L{Deferred} returned by whichever method was
called.
"""
d = getattr(self.client, self.method)('foobox')
self.assertEquals(
self.transport.value(), '0001 %s foobox\r\n' % (self.command,))
return d
def _response(self, *lines):
"""
Deliver the given (unterminated) response lines to C{self.client} and
then deliver a tagged SELECT or EXAMINE completion line to finish the
SELECT or EXAMINE response.
"""
for line in lines:
self.client.dataReceived(line + '\r\n')
self.client.dataReceived(
'0001 OK [READ-ONLY] %s completed\r\n' % (self.command,))
def test_exists(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{EXISTS} response, the L{Deferred} return by L{IMAP4Client.select} or
L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'EXISTS'} key.
"""
d = self._examineOrSelect()
self._response('* 3 EXISTS')
self.assertEquals(
self._extractDeferredResult(d),
{'READ-WRITE': False, 'EXISTS': 3})
def test_nonIntegerExists(self):
"""
If the server returns a non-integer EXISTS value in its response to a
I{SELECT} or I{EXAMINE} command, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fails with
L{IllegalServerResponse}.
"""
d = self._examineOrSelect()
self._response('* foo EXISTS')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_recent(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{RECENT} response, the L{Deferred} return by L{IMAP4Client.select} or
L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'RECENT'} key.
"""
d = self._examineOrSelect()
self._response('* 5 RECENT')
self.assertEquals(
self._extractDeferredResult(d),
{'READ-WRITE': False, 'RECENT': 5})
def test_nonIntegerRecent(self):
"""
If the server returns a non-integer RECENT value in its response to a
I{SELECT} or I{EXAMINE} command, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fails with
L{IllegalServerResponse}.
"""
d = self._examineOrSelect()
self._response('* foo RECENT')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_unseen(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{UNSEEN} response, the L{Deferred} returned by L{IMAP4Client.select} or
L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'UNSEEN'} key.
"""
d = self._examineOrSelect()
self._response('* OK [UNSEEN 8] Message 8 is first unseen')
self.assertEquals(
self._extractDeferredResult(d),
{'READ-WRITE': False, 'UNSEEN': 8})
def test_nonIntegerUnseen(self):
"""
If the server returns a non-integer UNSEEN value in its response to a
I{SELECT} or I{EXAMINE} command, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fails with
L{IllegalServerResponse}.
"""
d = self._examineOrSelect()
self._response('* OK [UNSEEN foo] Message foo is first unseen')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_uidvalidity(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{UIDVALIDITY} response, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fires with a C{dict}
including the value associated with the C{'UIDVALIDITY'} key.
"""
d = self._examineOrSelect()
self._response('* OK [UIDVALIDITY 12345] UIDs valid')
self.assertEquals(
self._extractDeferredResult(d),
{'READ-WRITE': False, 'UIDVALIDITY': 12345})
def test_nonIntegerUIDVALIDITY(self):
"""
If the server returns a non-integer UIDVALIDITY value in its response to
a I{SELECT} or I{EXAMINE} command, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fails with
L{IllegalServerResponse}.
"""
d = self._examineOrSelect()
self._response('* OK [UIDVALIDITY foo] UIDs valid')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_uidnext(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{UIDNEXT} response, the L{Deferred} returned by L{IMAP4Client.select}
or L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'UIDNEXT'} key.
"""
d = self._examineOrSelect()
self._response('* OK [UIDNEXT 4392] Predicted next UID')
self.assertEquals(
self._extractDeferredResult(d),
{'READ-WRITE': False, 'UIDNEXT': 4392})
def test_nonIntegerUIDNEXT(self):
"""
If the server returns a non-integer UIDNEXT value in its response to a
I{SELECT} or I{EXAMINE} command, the L{Deferred} returned by
L{IMAP4Client.select} or L{IMAP4Client.examine} fails with
L{IllegalServerResponse}.
"""
d = self._examineOrSelect()
self._response('* OK [UIDNEXT foo] Predicted next UID')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_flags(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{FLAGS} response, the L{Deferred} returned by L{IMAP4Client.select} or
L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'FLAGS'} key.
"""
d = self._examineOrSelect()
self._response(
'* FLAGS (\\Answered \\Flagged \\Deleted \\Seen \\Draft)')
self.assertEquals(
self._extractDeferredResult(d), {
'READ-WRITE': False,
'FLAGS': ('\\Answered', '\\Flagged', '\\Deleted', '\\Seen',
'\\Draft')})
def test_permanentflags(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{FLAGS} response, the L{Deferred} returned by L{IMAP4Client.select} or
L{IMAP4Client.examine} fires with a C{dict} including the value
associated with the C{'FLAGS'} key.
"""
d = self._examineOrSelect()
self._response(
'* OK [PERMANENTFLAGS (\\Starred)] Just one permanent flag in '
'that list up there')
self.assertEquals(
self._extractDeferredResult(d), {
'READ-WRITE': False,
'PERMANENTFLAGS': ('\\Starred',)})
def test_unrecognizedOk(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{OK} with unrecognized response code text, parsing does not fail.
"""
d = self._examineOrSelect()
self._response(
'* OK [X-MADE-UP] I just made this response text up.')
# The value won't show up in the result. It would be okay if it did
# someday, perhaps. This shouldn't ever happen, though.
self.assertEquals(
self._extractDeferredResult(d), {'READ-WRITE': False})
def test_bareOk(self):
"""
If the server response to a I{SELECT} or I{EXAMINE} command includes an
I{OK} with no response code text, parsing does not fail.
"""
d = self._examineOrSelect()
self._response('* OK')
self.assertEquals(
self._extractDeferredResult(d), {'READ-WRITE': False})
class IMAP4ClientExamineTests(SelectionTestsMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.examine} method.
An example of usage of the EXAMINE command from RFC 3501, section 6.3.2::
S: * 17 EXISTS
S: * 2 RECENT
S: * OK [UNSEEN 8] Message 8 is first unseen
S: * OK [UIDVALIDITY 3857529045] UIDs valid
S: * OK [UIDNEXT 4392] Predicted next UID
S: * FLAGS (\\Answered \\Flagged \\Deleted \\Seen \\Draft)
S: * OK [PERMANENTFLAGS ()] No permanent flags permitted
S: A932 OK [READ-ONLY] EXAMINE completed
"""
method = 'examine'
command = 'EXAMINE'
class IMAP4ClientSelectTests(SelectionTestsMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.select} method.
An example of usage of the SELECT command from RFC 3501, section 6.3.1::
C: A142 SELECT INBOX
S: * 172 EXISTS
S: * 1 RECENT
S: * OK [UNSEEN 12] Message 12 is first unseen
S: * OK [UIDVALIDITY 3857529045] UIDs valid
S: * OK [UIDNEXT 4392] Predicted next UID
S: * FLAGS (\Answered \Flagged \Deleted \Seen \Draft)
S: * OK [PERMANENTFLAGS (\Deleted \Seen \*)] Limited
S: A142 OK [READ-WRITE] SELECT completed
"""
method = 'select'
command = 'SELECT'
class IMAP4ClientExpungeTests(PreauthIMAP4ClientMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.expunge} method.
An example of usage of the EXPUNGE command from RFC 3501, section 6.4.3::
C: A202 EXPUNGE
S: * 3 EXPUNGE
S: * 3 EXPUNGE
S: * 5 EXPUNGE
S: * 8 EXPUNGE
S: A202 OK EXPUNGE completed
"""
def _expunge(self):
d = self.client.expunge()
self.assertEquals(self.transport.value(), '0001 EXPUNGE\r\n')
self.transport.clear()
return d
def _response(self, sequenceNumbers):
for number in sequenceNumbers:
self.client.lineReceived('* %s EXPUNGE' % (number,))
self.client.lineReceived('0001 OK EXPUNGE COMPLETED')
def test_expunge(self):
"""
L{IMAP4Client.expunge} sends the I{EXPUNGE} command and returns a
L{Deferred} which fires with a C{list} of message sequence numbers
given by the server's response.
"""
d = self._expunge()
self._response([3, 3, 5, 8])
self.assertEquals(self._extractDeferredResult(d), [3, 3, 5, 8])
def test_nonIntegerExpunged(self):
"""
If the server responds with a non-integer where a message sequence
number is expected, the L{Deferred} returned by L{IMAP4Client.expunge}
fails with L{IllegalServerResponse}.
"""
d = self._expunge()
self._response([3, 3, 'foo', 8])
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
class IMAP4ClientSearchTests(PreauthIMAP4ClientMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.search} method.
An example of usage of the SEARCH command from RFC 3501, section 6.4.4::
C: A282 SEARCH FLAGGED SINCE 1-Feb-1994 NOT FROM "Smith"
S: * SEARCH 2 84 882
S: A282 OK SEARCH completed
C: A283 SEARCH TEXT "string not in mailbox"
S: * SEARCH
S: A283 OK SEARCH completed
C: A284 SEARCH CHARSET UTF-8 TEXT {6}
C: XXXXXX
S: * SEARCH 43
S: A284 OK SEARCH completed
"""
def _search(self):
d = self.client.search(imap4.Query(text="ABCDEF"))
self.assertEquals(
self.transport.value(), '0001 SEARCH (TEXT "ABCDEF")\r\n')
return d
def _response(self, messageNumbers):
self.client.lineReceived(
"* SEARCH " + " ".join(map(str, messageNumbers)))
self.client.lineReceived("0001 OK SEARCH completed")
def test_search(self):
"""
L{IMAP4Client.search} sends the I{SEARCH} command and returns a
L{Deferred} which fires with a C{list} of message sequence numbers
given by the server's response.
"""
d = self._search()
self._response([2, 5, 10])
self.assertEquals(self._extractDeferredResult(d), [2, 5, 10])
def test_nonIntegerFound(self):
"""
If the server responds with a non-integer where a message sequence
number is expected, the L{Deferred} returned by L{IMAP4Client.search}
fails with L{IllegalServerResponse}.
"""
d = self._search()
self._response([2, "foo", 10])
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
class IMAP4ClientFetchTests(PreauthIMAP4ClientMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.fetch} method.
See RFC 3501, section 6.4.5.
"""
def test_fetchUID(self):
"""
L{IMAP4Client.fetchUID} sends the I{FETCH UID} command and returns a
L{Deferred} which fires with a C{dict} mapping message sequence numbers
to C{dict}s mapping C{'UID'} to that message's I{UID} in the server's
response.
"""
d = self.client.fetchUID('1:7')
self.assertEquals(self.transport.value(), '0001 FETCH 1:7 (UID)\r\n')
self.client.lineReceived('* 2 FETCH (UID 22)')
self.client.lineReceived('* 3 FETCH (UID 23)')
self.client.lineReceived('* 4 FETCH (UID 24)')
self.client.lineReceived('* 5 FETCH (UID 25)')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d), {
2: {'UID': '22'},
3: {'UID': '23'},
4: {'UID': '24'},
5: {'UID': '25'}})
def test_fetchUIDNonIntegerFound(self):
"""
If the server responds with a non-integer where a message sequence
number is expected, the L{Deferred} returned by L{IMAP4Client.fetchUID}
fails with L{IllegalServerResponse}.
"""
d = self.client.fetchUID('1')
self.assertEquals(self.transport.value(), '0001 FETCH 1 (UID)\r\n')
self.client.lineReceived('* foo FETCH (UID 22)')
self.client.lineReceived('0001 OK FETCH completed')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_incompleteFetchUIDResponse(self):
"""
If the server responds with an incomplete I{FETCH} response line, the
L{Deferred} returned by L{IMAP4Client.fetchUID} fails with
L{IllegalServerResponse}.
"""
d = self.client.fetchUID('1:7')
self.assertEquals(self.transport.value(), '0001 FETCH 1:7 (UID)\r\n')
self.client.lineReceived('* 2 FETCH (UID 22)')
self.client.lineReceived('* 3 FETCH (UID)')
self.client.lineReceived('* 4 FETCH (UID 24)')
self.client.lineReceived('0001 OK FETCH completed')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_fetchBody(self):
"""
L{IMAP4Client.fetchBody} sends the I{FETCH BODY} command and returns a
L{Deferred} which fires with a C{dict} mapping message sequence numbers
to C{dict}s mapping C{'RFC822.TEXT'} to that message's body as given in
the server's response.
"""
d = self.client.fetchBody('3')
self.assertEquals(
self.transport.value(), '0001 FETCH 3 (RFC822.TEXT)\r\n')
self.client.lineReceived('* 3 FETCH (RFC822.TEXT "Message text")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{3: {'RFC822.TEXT': 'Message text'}})
def test_fetchSpecific(self):
"""
L{IMAP4Client.fetchSpecific} sends the I{BODY[]} command if no
parameters beyond the message set to retrieve are given. It returns a
L{Deferred} which fires with a C{dict} mapping message sequence numbers
to C{list}s of corresponding message data given by the server's
response.
"""
d = self.client.fetchSpecific('7')
self.assertEquals(
self.transport.value(), '0001 FETCH 7 BODY[]\r\n')
self.client.lineReceived('* 7 FETCH (BODY[] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d), {7: [['BODY', [], "Some body"]]})
def test_fetchSpecificPeek(self):
"""
L{IMAP4Client.fetchSpecific} issues a I{BODY.PEEK[]} command if passed
C{True} for the C{peek} parameter.
"""
d = self.client.fetchSpecific('6', peek=True)
self.assertEquals(
self.transport.value(), '0001 FETCH 6 BODY.PEEK[]\r\n')
# BODY.PEEK responses are just BODY
self.client.lineReceived('* 6 FETCH (BODY[] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d), {6: [['BODY', [], "Some body"]]})
def test_fetchSpecificNumbered(self):
"""
L{IMAP4Client.fetchSpecific}, when passed a sequence for for
C{headerNumber}, sends the I{BODY[N.M]} command. It returns a
L{Deferred} which fires with a C{dict} mapping message sequence numbers
to C{list}s of corresponding message data given by the server's
response.
"""
d = self.client.fetchSpecific('7', headerNumber=(1, 2, 3))
self.assertEquals(
self.transport.value(), '0001 FETCH 7 BODY[1.2.3]\r\n')
self.client.lineReceived('* 7 FETCH (BODY[1.2.3] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{7: [['BODY', ['1.2.3'], "Some body"]]})
def test_fetchSpecificText(self):
"""
L{IMAP4Client.fetchSpecific}, when passed C{'TEXT'} for C{headerType},
sends the I{BODY[TEXT]} command. It returns a L{Deferred} which fires
with a C{dict} mapping message sequence numbers to C{list}s of
corresponding message data given by the server's response.
"""
d = self.client.fetchSpecific('8', headerType='TEXT')
self.assertEquals(
self.transport.value(), '0001 FETCH 8 BODY[TEXT]\r\n')
self.client.lineReceived('* 8 FETCH (BODY[TEXT] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{8: [['BODY', ['TEXT'], "Some body"]]})
def test_fetchSpecificNumberedText(self):
"""
If passed a value for the C{headerNumber} parameter and C{'TEXT'} for
the C{headerType} parameter, L{IMAP4Client.fetchSpecific} sends a
I{BODY[number.TEXT]} request and returns a L{Deferred} which fires with
a C{dict} mapping message sequence numbers to C{list}s of message data
given by the server's response.
"""
d = self.client.fetchSpecific('4', headerType='TEXT', headerNumber=7)
self.assertEquals(
self.transport.value(), '0001 FETCH 4 BODY[7.TEXT]\r\n')
self.client.lineReceived('* 4 FETCH (BODY[7.TEXT] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{4: [['BODY', ['7.TEXT'], "Some body"]]})
def test_incompleteFetchSpecificTextResponse(self):
"""
If the server responds to a I{BODY[TEXT]} request with a I{FETCH} line
which is truncated after the I{BODY[TEXT]} tokens, the L{Deferred}
returned by L{IMAP4Client.fetchUID} fails with
L{IllegalServerResponse}.
"""
d = self.client.fetchSpecific('8', headerType='TEXT')
self.assertEquals(
self.transport.value(), '0001 FETCH 8 BODY[TEXT]\r\n')
self.client.lineReceived('* 8 FETCH (BODY[TEXT])')
self.client.lineReceived('0001 OK FETCH completed')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_fetchSpecificMIME(self):
"""
L{IMAP4Client.fetchSpecific}, when passed C{'MIME'} for C{headerType},
sends the I{BODY[MIME]} command. It returns a L{Deferred} which fires
with a C{dict} mapping message sequence numbers to C{list}s of
corresponding message data given by the server's response.
"""
d = self.client.fetchSpecific('8', headerType='MIME')
self.assertEquals(
self.transport.value(), '0001 FETCH 8 BODY[MIME]\r\n')
self.client.lineReceived('* 8 FETCH (BODY[MIME] "Some body")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{8: [['BODY', ['MIME'], "Some body"]]})
def test_fetchSpecificPartial(self):
"""
L{IMAP4Client.fetchSpecific}, when passed C{offset} and C{length},
sends a partial content request (like I{BODY[TEXT]<offset.length>}).
It returns a L{Deferred} which fires with a C{dict} mapping message
sequence numbers to C{list}s of corresponding message data given by the
server's response.
"""
d = self.client.fetchSpecific(
'9', headerType='TEXT', offset=17, length=3)
self.assertEquals(
self.transport.value(), '0001 FETCH 9 BODY[TEXT]<17.3>\r\n')
self.client.lineReceived('* 9 FETCH (BODY[TEXT]<17> "foo")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d),
{9: [['BODY', ['TEXT'], '<17>', 'foo']]})
def test_incompleteFetchSpecificPartialResponse(self):
"""
If the server responds to a I{BODY[TEXT]} request with a I{FETCH} line
which is truncated after the I{BODY[TEXT]<offset>} tokens, the
L{Deferred} returned by L{IMAP4Client.fetchUID} fails with
L{IllegalServerResponse}.
"""
d = self.client.fetchSpecific('8', headerType='TEXT')
self.assertEquals(
self.transport.value(), '0001 FETCH 8 BODY[TEXT]\r\n')
self.client.lineReceived('* 8 FETCH (BODY[TEXT]<17>)')
self.client.lineReceived('0001 OK FETCH completed')
self.assertRaises(
imap4.IllegalServerResponse, self._extractDeferredResult, d)
def test_fetchSpecificHTML(self):
"""
If the body of a message begins with I{<} and ends with I{>} (as,
for example, HTML bodies typically will), this is still interpreted
as the body by L{IMAP4Client.fetchSpecific} (and particularly, not
as a length indicator for a response to a request for a partial
body).
"""
d = self.client.fetchSpecific('7')
self.assertEquals(
self.transport.value(), '0001 FETCH 7 BODY[]\r\n')
self.client.lineReceived('* 7 FETCH (BODY[] "<html>test</html>")')
self.client.lineReceived('0001 OK FETCH completed')
self.assertEquals(
self._extractDeferredResult(d), {7: [['BODY', [], "<html>test</html>"]]})
class IMAP4ClientStoreTests(PreauthIMAP4ClientMixin, unittest.TestCase):
"""
Tests for the L{IMAP4Client.setFlags}, L{IMAP4Client.addFlags}, and
L{IMAP4Client.removeFlags} methods.
An example of usage of the STORE command, in terms of which these three
methods are implemented, from RFC 3501, section 6.4.6::
C: A003 STORE 2:4 +FLAGS (\Deleted)
S: * 2 FETCH (FLAGS (\Deleted \Seen))
S: * 3 FETCH (FLAGS (\Deleted))
S: * 4 FETCH (FLAGS (\Deleted \Flagged \Seen))
S: A003 OK STORE completed
"""
clientProtocol = StillSimplerClient
def _flagsTest(self, method, item):
"""
Test a non-silent flag modifying method. Call the method, assert that
the correct bytes are sent, deliver a I{FETCH} response, and assert
that the result of the Deferred returned by the method is correct.
@param method: The name of the method to test.
@param item: The data item which is expected to be specified.
"""
d = getattr(self.client, method)('3', ('\\Read', '\\Seen'), False)
self.assertEquals(
self.transport.value(),
'0001 STORE 3 ' + item + ' (\\Read \\Seen)\r\n')
self.client.lineReceived('* 3 FETCH (FLAGS (\\Read \\Seen))')
self.client.lineReceived('0001 OK STORE completed')
self.assertEquals(
self._extractDeferredResult(d),
{3: {'FLAGS': ['\\Read', '\\Seen']}})
def _flagsSilentlyTest(self, method, item):
"""
Test a silent flag modifying method. Call the method, assert that the
correct bytes are sent, deliver an I{OK} response, and assert that the
result of the Deferred returned by the method is correct.
@param method: The name of the method to test.
@param item: The data item which is expected to be specified.
"""
d = getattr(self.client, method)('3', ('\\Read', '\\Seen'), True)
self.assertEquals(
self.transport.value(),
'0001 STORE 3 ' + item + ' (\\Read \\Seen)\r\n')
self.client.lineReceived('0001 OK STORE completed')
self.assertEquals(self._extractDeferredResult(d), {})
def _flagsSilentlyWithUnsolicitedDataTest(self, method, item):
"""
Test unsolicited data received in response to a silent flag modifying
method. Call the method, assert that the correct bytes are sent,
deliver the unsolicited I{FETCH} response, and assert that the result
of the Deferred returned by the method is correct.
@param method: The name of the method to test.
@param item: The data item which is expected to be specified.
"""
d = getattr(self.client, method)('3', ('\\Read', '\\Seen'), True)
self.assertEquals(
self.transport.value(),
'0001 STORE 3 ' + item + ' (\\Read \\Seen)\r\n')
self.client.lineReceived('* 2 FETCH (FLAGS (\\Read \\Seen))')
self.client.lineReceived('0001 OK STORE completed')
self.assertEquals(self._extractDeferredResult(d), {})
self.assertEquals(self.client.flags, {2: ['\\Read', '\\Seen']})
def test_setFlags(self):
"""
When passed a C{False} value for the C{silent} parameter,
L{IMAP4Client.setFlags} sends the I{STORE} command with a I{FLAGS} data
item and returns a L{Deferred} which fires with a C{dict} mapping
message sequence numbers to C{dict}s mapping C{'FLAGS'} to the new
flags of those messages.
"""
self._flagsTest('setFlags', 'FLAGS')
def test_setFlagsSilently(self):
"""
When passed a C{True} value for the C{silent} parameter,
L{IMAP4Client.setFlags} sends the I{STORE} command with a
I{FLAGS.SILENT} data item and returns a L{Deferred} which fires with an
empty dictionary.
"""
self._flagsSilentlyTest('setFlags', 'FLAGS.SILENT')
def test_setFlagsSilentlyWithUnsolicitedData(self):
"""
If unsolicited flag data is received in response to a I{STORE}
I{FLAGS.SILENT} request, that data is passed to the C{flagsChanged}
callback.
"""
self._flagsSilentlyWithUnsolicitedDataTest('setFlags', 'FLAGS.SILENT')
def test_addFlags(self):
"""
L{IMAP4Client.addFlags} is like L{IMAP4Client.setFlags}, but sends
I{+FLAGS} instead of I{FLAGS}.
"""
self._flagsTest('addFlags', '+FLAGS')
def test_addFlagsSilently(self):
"""
L{IMAP4Client.addFlags} with a C{True} value for C{silent} behaves like
L{IMAP4Client.setFlags} with a C{True} value for C{silent}, but it
sends I{+FLAGS.SILENT} instead of I{FLAGS.SILENT}.
"""
self._flagsSilentlyTest('addFlags', '+FLAGS.SILENT')
def test_addFlagsSilentlyWithUnsolicitedData(self):
"""
L{IMAP4Client.addFlags} behaves like L{IMAP4Client.setFlags} when used
in silent mode and unsolicited data is received.
"""
self._flagsSilentlyWithUnsolicitedDataTest('addFlags', '+FLAGS.SILENT')
def test_removeFlags(self):
"""
L{IMAP4Client.removeFlags} is like L{IMAP4Client.setFlags}, but sends
I{-FLAGS} instead of I{FLAGS}.
"""
self._flagsTest('removeFlags', '-FLAGS')
def test_removeFlagsSilently(self):
"""
L{IMAP4Client.removeFlags} with a C{True} value for C{silent} behaves
like L{IMAP4Client.setFlags} with a C{True} value for C{silent}, but it
sends I{-FLAGS.SILENT} instead of I{FLAGS.SILENT}.
"""
self._flagsSilentlyTest('removeFlags', '-FLAGS.SILENT')
def test_removeFlagsSilentlyWithUnsolicitedData(self):
"""
L{IMAP4Client.removeFlags} behaves like L{IMAP4Client.setFlags} when
used in silent mode and unsolicited data is received.
"""
self._flagsSilentlyWithUnsolicitedDataTest('removeFlags', '-FLAGS.SILENT')
class FakeyServer(imap4.IMAP4Server):
state = 'select'
timeout = None
def sendServerGreeting(self):
pass
class FakeyMessage(util.FancyStrMixin):
implements(imap4.IMessage)
showAttributes = ('headers', 'flags', 'date', 'body', 'uid')
def __init__(self, headers, flags, date, body, uid, subpart):
self.headers = headers
self.flags = flags
self.body = StringIO(body)
self.size = len(body)
self.date = date
self.uid = uid
self.subpart = subpart
def getHeaders(self, negate, *names):
self.got_headers = negate, names
return self.headers
def getFlags(self):
return self.flags
def getInternalDate(self):
return self.date
def getBodyFile(self):
return self.body
def getSize(self):
return self.size
def getUID(self):
return self.uid
def isMultipart(self):
return self.subpart is not None
def getSubPart(self, part):
self.got_subpart = part
return self.subpart[part]
class NewStoreTestCase(unittest.TestCase, IMAP4HelperMixin):
result = None
storeArgs = None
def setUp(self):
self.received_messages = self.received_uid = None
self.server = imap4.IMAP4Server()
self.server.state = 'select'
self.server.mbox = self
self.connected = defer.Deferred()
self.client = SimpleClient(self.connected)
def addListener(self, x):
pass
def removeListener(self, x):
pass
def store(self, *args, **kw):
self.storeArgs = args, kw
return self.response
def _storeWork(self):
def connected():
return self.function(self.messages, self.flags, self.silent, self.uid)
def result(R):
self.result = R
self.connected.addCallback(strip(connected)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
def check(ignored):
self.assertEquals(self.result, self.expected)
self.assertEquals(self.storeArgs, self.expectedArgs)
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(check)
return d
def testSetFlags(self, uid=0):
self.function = self.client.setFlags
self.messages = '1,5,9'
self.flags = ['\\A', '\\B', 'C']
self.silent = False
self.uid = uid
self.response = {
1: ['\\A', '\\B', 'C'],
5: ['\\A', '\\B', 'C'],
9: ['\\A', '\\B', 'C'],
}
self.expected = {
1: {'FLAGS': ['\\A', '\\B', 'C']},
5: {'FLAGS': ['\\A', '\\B', 'C']},
9: {'FLAGS': ['\\A', '\\B', 'C']},
}
msg = imap4.MessageSet()
msg.add(1)
msg.add(5)
msg.add(9)
self.expectedArgs = ((msg, ['\\A', '\\B', 'C'], 0), {'uid': 0})
return self._storeWork()
class NewFetchTestCase(unittest.TestCase, IMAP4HelperMixin):
def setUp(self):
self.received_messages = self.received_uid = None
self.result = None
self.server = imap4.IMAP4Server()
self.server.state = 'select'
self.server.mbox = self
self.connected = defer.Deferred()
self.client = SimpleClient(self.connected)
def addListener(self, x):
pass
def removeListener(self, x):
pass
def fetch(self, messages, uid):
self.received_messages = messages
self.received_uid = uid
return iter(zip(range(len(self.msgObjs)), self.msgObjs))
def _fetchWork(self, uid):
if uid:
for (i, msg) in zip(range(len(self.msgObjs)), self.msgObjs):
self.expected[i]['UID'] = str(msg.getUID())
def result(R):
self.result = R
self.connected.addCallback(lambda _: self.function(self.messages, uid)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(lambda x : self.assertEquals(self.result, self.expected))
return d
def testFetchUID(self):
self.function = lambda m, u: self.client.fetchUID(m)
self.messages = '7'
self.msgObjs = [
FakeyMessage({}, (), '', '', 12345, None),
FakeyMessage({}, (), '', '', 999, None),
FakeyMessage({}, (), '', '', 10101, None),
]
self.expected = {
0: {'UID': '12345'},
1: {'UID': '999'},
2: {'UID': '10101'},
}
return self._fetchWork(0)
def testFetchFlags(self, uid=0):
self.function = self.client.fetchFlags
self.messages = '9'
self.msgObjs = [
FakeyMessage({}, ['FlagA', 'FlagB', '\\FlagC'], '', '', 54321, None),
FakeyMessage({}, ['\\FlagC', 'FlagA', 'FlagB'], '', '', 12345, None),
]
self.expected = {
0: {'FLAGS': ['FlagA', 'FlagB', '\\FlagC']},
1: {'FLAGS': ['\\FlagC', 'FlagA', 'FlagB']},
}
return self._fetchWork(uid)
def testFetchFlagsUID(self):
return self.testFetchFlags(1)
def testFetchInternalDate(self, uid=0):
self.function = self.client.fetchInternalDate
self.messages = '13'
self.msgObjs = [
FakeyMessage({}, (), 'Fri, 02 Nov 2003 21:25:10 GMT', '', 23232, None),
FakeyMessage({}, (), 'Thu, 29 Dec 2013 11:31:52 EST', '', 101, None),
FakeyMessage({}, (), 'Mon, 10 Mar 1992 02:44:30 CST', '', 202, None),
FakeyMessage({}, (), 'Sat, 11 Jan 2000 14:40:24 PST', '', 303, None),
]
self.expected = {
0: {'INTERNALDATE': '02-Nov-2003 21:25:10 +0000'},
1: {'INTERNALDATE': '29-Dec-2013 11:31:52 -0500'},
2: {'INTERNALDATE': '10-Mar-1992 02:44:30 -0600'},
3: {'INTERNALDATE': '11-Jan-2000 14:40:24 -0800'},
}
return self._fetchWork(uid)
def testFetchInternalDateUID(self):
return self.testFetchInternalDate(1)
def testFetchEnvelope(self, uid=0):
self.function = self.client.fetchEnvelope
self.messages = '15'
self.msgObjs = [
FakeyMessage({
'from': 'user@domain', 'to': 'resu@domain',
'date': 'thursday', 'subject': 'it is a message',
'message-id': 'id-id-id-yayaya'}, (), '', '', 65656,
None),
]
self.expected = {
0: {'ENVELOPE':
['thursday', 'it is a message',
[[None, None, 'user', 'domain']],
[[None, None, 'user', 'domain']],
[[None, None, 'user', 'domain']],
[[None, None, 'resu', 'domain']],
None, None, None, 'id-id-id-yayaya']
}
}
return self._fetchWork(uid)
def testFetchEnvelopeUID(self):
return self.testFetchEnvelope(1)
def testFetchBodyStructure(self, uid=0):
self.function = self.client.fetchBodyStructure
self.messages = '3:9,10:*'
self.msgObjs = [FakeyMessage({
'content-type': 'text/plain; name=thing; key="value"',
'content-id': 'this-is-the-content-id',
'content-description': 'describing-the-content-goes-here!',
'content-transfer-encoding': '8BIT',
}, (), '', 'Body\nText\nGoes\nHere\n', 919293, None)]
self.expected = {0: {'BODYSTRUCTURE': [
'text', 'plain', [['name', 'thing'], ['key', 'value']],
'this-is-the-content-id', 'describing-the-content-goes-here!',
'8BIT', '20', '4', None, None, None]}}
return self._fetchWork(uid)
def testFetchBodyStructureUID(self):
return self.testFetchBodyStructure(1)
def testFetchSimplifiedBody(self, uid=0):
self.function = self.client.fetchSimplifiedBody
self.messages = '21'
self.msgObjs = [FakeyMessage({}, (), '', 'Yea whatever', 91825,
[FakeyMessage({'content-type': 'image/jpg'}, (), '',
'Body Body Body', None, None
)]
)]
self.expected = {0:
{'BODY':
[None, None, [], None, None, None,
'12'
]
}
}
return self._fetchWork(uid)
def testFetchSimplifiedBodyUID(self):
return self.testFetchSimplifiedBody(1)
def testFetchSimplifiedBodyText(self, uid=0):
self.function = self.client.fetchSimplifiedBody
self.messages = '21'
self.msgObjs = [FakeyMessage({'content-type': 'text/plain'},
(), '', 'Yea whatever', 91825, None)]
self.expected = {0:
{'BODY':
['text', 'plain', [], None, None, None,
'12', '1'
]
}
}
return self._fetchWork(uid)
def testFetchSimplifiedBodyTextUID(self):
return self.testFetchSimplifiedBodyText(1)
def testFetchSimplifiedBodyRFC822(self, uid=0):
self.function = self.client.fetchSimplifiedBody
self.messages = '21'
self.msgObjs = [FakeyMessage({'content-type': 'message/rfc822'},
(), '', 'Yea whatever', 91825,
[FakeyMessage({'content-type': 'image/jpg'}, (), '',
'Body Body Body', None, None
)]
)]
self.expected = {0:
{'BODY':
['message', 'rfc822', [], None, None, None,
'12', [None, None, [[None, None, None]],
[[None, None, None]], None, None, None,
None, None, None], ['image', 'jpg', [],
None, None, None, '14'], '1'
]
}
}
return self._fetchWork(uid)
def testFetchSimplifiedBodyRFC822UID(self):
return self.testFetchSimplifiedBodyRFC822(1)
def testFetchMessage(self, uid=0):
self.function = self.client.fetchMessage
self.messages = '1,3,7,10101'
self.msgObjs = [
FakeyMessage({'Header': 'Value'}, (), '', 'BODY TEXT\r\n', 91, None),
]
self.expected = {
0: {'RFC822': 'Header: Value\r\n\r\nBODY TEXT\r\n'}
}
return self._fetchWork(uid)
def testFetchMessageUID(self):
return self.testFetchMessage(1)
def testFetchHeaders(self, uid=0):
self.function = self.client.fetchHeaders
self.messages = '9,6,2'
self.msgObjs = [
FakeyMessage({'H1': 'V1', 'H2': 'V2'}, (), '', '', 99, None),
]
self.expected = {
0: {'RFC822.HEADER': imap4._formatHeaders({'H1': 'V1', 'H2': 'V2'})},
}
return self._fetchWork(uid)
def testFetchHeadersUID(self):
return self.testFetchHeaders(1)
def testFetchBody(self, uid=0):
self.function = self.client.fetchBody
self.messages = '1,2,3,4,5,6,7'
self.msgObjs = [
FakeyMessage({'Header': 'Value'}, (), '', 'Body goes here\r\n', 171, None),
]
self.expected = {
0: {'RFC822.TEXT': 'Body goes here\r\n'},
}
return self._fetchWork(uid)
def testFetchBodyUID(self):
return self.testFetchBody(1)
def testFetchBodyParts(self):
"""
Test the server's handling of requests for specific body sections.
"""
self.function = self.client.fetchSpecific
self.messages = '1'
outerBody = ''
innerBody1 = 'Contained body message text. Squarge.'
innerBody2 = 'Secondary <i>message</i> text of squarge body.'
headers = util.OrderedDict()
headers['from'] = 'sender@host'
headers['to'] = 'recipient@domain'
headers['subject'] = 'booga booga boo'
headers['content-type'] = 'multipart/alternative; boundary="xyz"'
innerHeaders = util.OrderedDict()
innerHeaders['subject'] = 'this is subject text'
innerHeaders['content-type'] = 'text/plain'
innerHeaders2 = util.OrderedDict()
innerHeaders2['subject'] = '<b>this is subject</b>'
innerHeaders2['content-type'] = 'text/html'
self.msgObjs = [FakeyMessage(
headers, (), None, outerBody, 123,
[FakeyMessage(innerHeaders, (), None, innerBody1, None, None),
FakeyMessage(innerHeaders2, (), None, innerBody2, None, None)])]
self.expected = {
0: [['BODY', ['1'], 'Contained body message text. Squarge.']]}
def result(R):
self.result = R
self.connected.addCallback(
lambda _: self.function(self.messages, headerNumber=1))
self.connected.addCallback(result)
self.connected.addCallback(self._cbStopClient)
self.connected.addErrback(self._ebGeneral)
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(lambda ign: self.assertEquals(self.result, self.expected))
return d
def test_fetchBodyPartOfNonMultipart(self):
"""
Single-part messages have an implicit first part which clients
should be able to retrieve explicitly. Test that a client
requesting part 1 of a text/plain message receives the body of the
text/plain part.
"""
self.function = self.client.fetchSpecific
self.messages = '1'
parts = [1]
outerBody = 'DA body'
headers = util.OrderedDict()
headers['from'] = 'sender@host'
headers['to'] = 'recipient@domain'
headers['subject'] = 'booga booga boo'
headers['content-type'] = 'text/plain'
self.msgObjs = [FakeyMessage(
headers, (), None, outerBody, 123, None)]
self.expected = {0: [['BODY', ['1'], 'DA body']]}
def result(R):
self.result = R
self.connected.addCallback(
lambda _: self.function(self.messages, headerNumber=parts))
self.connected.addCallback(result)
self.connected.addCallback(self._cbStopClient)
self.connected.addErrback(self._ebGeneral)
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(lambda ign: self.assertEquals(self.result, self.expected))
return d
def testFetchSize(self, uid=0):
self.function = self.client.fetchSize
self.messages = '1:100,2:*'
self.msgObjs = [
FakeyMessage({}, (), '', 'x' * 20, 123, None),
]
self.expected = {
0: {'RFC822.SIZE': '20'},
}
return self._fetchWork(uid)
def testFetchSizeUID(self):
return self.testFetchSize(1)
def testFetchFull(self, uid=0):
self.function = self.client.fetchFull
self.messages = '1,3'
self.msgObjs = [
FakeyMessage({}, ('\\XYZ', '\\YZX', 'Abc'),
'Sun, 25 Jul 2010 06:20:30 -0400 (EDT)',
'xyz' * 2, 654, None),
FakeyMessage({}, ('\\One', '\\Two', 'Three'),
'Mon, 14 Apr 2003 19:43:44 -0400',
'abc' * 4, 555, None),
]
self.expected = {
0: {'FLAGS': ['\\XYZ', '\\YZX', 'Abc'],
'INTERNALDATE': '25-Jul-2010 06:20:30 -0400',
'RFC822.SIZE': '6',
'ENVELOPE': [None, None, [[None, None, None]], [[None, None, None]], None, None, None, None, None, None],
'BODY': [None, None, [], None, None, None, '6']},
1: {'FLAGS': ['\\One', '\\Two', 'Three'],
'INTERNALDATE': '14-Apr-2003 19:43:44 -0400',
'RFC822.SIZE': '12',
'ENVELOPE': [None, None, [[None, None, None]], [[None, None, None]], None, None, None, None, None, None],
'BODY': [None, None, [], None, None, None, '12']},
}
return self._fetchWork(uid)
def testFetchFullUID(self):
return self.testFetchFull(1)
def testFetchAll(self, uid=0):
self.function = self.client.fetchAll
self.messages = '1,2:3'
self.msgObjs = [
FakeyMessage({}, (), 'Mon, 14 Apr 2003 19:43:44 +0400',
'Lalala', 10101, None),
FakeyMessage({}, (), 'Tue, 15 Apr 2003 19:43:44 +0200',
'Alalal', 20202, None),
]
self.expected = {
0: {'ENVELOPE': [None, None, [[None, None, None]], [[None, None, None]], None, None, None, None, None, None],
'RFC822.SIZE': '6',
'INTERNALDATE': '14-Apr-2003 19:43:44 +0400',
'FLAGS': []},
1: {'ENVELOPE': [None, None, [[None, None, None]], [[None, None, None]], None, None, None, None, None, None],
'RFC822.SIZE': '6',
'INTERNALDATE': '15-Apr-2003 19:43:44 +0200',
'FLAGS': []},
}
return self._fetchWork(uid)
def testFetchAllUID(self):
return self.testFetchAll(1)
def testFetchFast(self, uid=0):
self.function = self.client.fetchFast
self.messages = '1'
self.msgObjs = [
FakeyMessage({}, ('\\X',), '19 Mar 2003 19:22:21 -0500', '', 9, None),
]
self.expected = {
0: {'FLAGS': ['\\X'],
'INTERNALDATE': '19-Mar-2003 19:22:21 -0500',
'RFC822.SIZE': '0'},
}
return self._fetchWork(uid)
def testFetchFastUID(self):
return self.testFetchFast(1)
class DefaultSearchTestCase(IMAP4HelperMixin, unittest.TestCase):
"""
Test the behavior of the server's SEARCH implementation, particularly in
the face of unhandled search terms.
"""
def setUp(self):
self.server = imap4.IMAP4Server()
self.server.state = 'select'
self.server.mbox = self
self.connected = defer.Deferred()
self.client = SimpleClient(self.connected)
self.msgObjs = [
FakeyMessage({}, (), '', '', 999, None),
FakeyMessage({}, (), '', '', 10101, None),
FakeyMessage({}, (), '', '', 12345, None),
FakeyMessage({}, (), '', '', 20001, None),
FakeyMessage({}, (), '', '', 20002, None),
]
def fetch(self, messages, uid):
"""
Pretend to be a mailbox and let C{self.server} lookup messages on me.
"""
return zip(range(1, len(self.msgObjs) + 1), self.msgObjs)
def _messageSetSearchTest(self, queryTerms, expectedMessages):
"""
Issue a search with given query and verify that the returned messages
match the given expected messages.
@param queryTerms: A string giving the search query.
@param expectedMessages: A list of the message sequence numbers
expected as the result of the search.
@return: A L{Deferred} which fires when the test is complete.
"""
def search():
return self.client.search(queryTerms)
d = self.connected.addCallback(strip(search))
def searched(results):
self.assertEquals(results, expectedMessages)
d.addCallback(searched)
d.addCallback(self._cbStopClient)
d.addErrback(self._ebGeneral)
self.loopback()
return d
def test_searchMessageSet(self):
"""
Test that a search which starts with a message set properly limits
the search results to messages in that set.
"""
return self._messageSetSearchTest('1', [1])
def test_searchMessageSetWithStar(self):
"""
If the search filter ends with a star, all the message from the
starting point are returned.
"""
return self._messageSetSearchTest('2:*', [2, 3, 4, 5])
def test_searchMessageSetWithStarFirst(self):
"""
If the search filter starts with a star, the result should be identical
with if the filter would end with a star.
"""
return self._messageSetSearchTest('*:2', [2, 3, 4, 5])
def test_searchMessageSetUIDWithStar(self):
"""
If the search filter ends with a star, all the message from the
starting point are returned (also for the SEARCH UID case).
"""
return self._messageSetSearchTest('UID 10000:*', [2, 3, 4, 5])
def test_searchMessageSetUIDWithStarFirst(self):
"""
If the search filter starts with a star, the result should be identical
with if the filter would end with a star (also for the SEARCH UID case).
"""
return self._messageSetSearchTest('UID *:10000', [2, 3, 4, 5])
def test_searchMessageSetUIDWithStarAndHighStart(self):
"""
A search filter of 1234:* should include the UID of the last message in
the mailbox, even if its UID is less than 1234.
"""
# in our fake mbox the highest message UID is 20002
return self._messageSetSearchTest('UID 30000:*', [5])
def test_searchMessageSetWithList(self):
"""
If the search filter contains nesting terms, one of which includes a
message sequence set with a wildcard, IT ALL WORKS GOOD.
"""
# 6 is bigger than the biggest message sequence number, but that's
# okay, because N:* includes the biggest message sequence number even
# if N is bigger than that (read the rfc nub).
return self._messageSetSearchTest('(6:*)', [5])
def test_searchOr(self):
"""
If the search filter contains an I{OR} term, all messages
which match either subexpression are returned.
"""
return self._messageSetSearchTest('OR 1 2', [1, 2])
def test_searchOrMessageSet(self):
"""
If the search filter contains an I{OR} term with a
subexpression which includes a message sequence set wildcard,
all messages in that set are considered for inclusion in the
results.
"""
return self._messageSetSearchTest('OR 2:* 2:*', [2, 3, 4, 5])
def test_searchNot(self):
"""
If the search filter contains a I{NOT} term, all messages
which do not match the subexpression are returned.
"""
return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])
def test_searchNotMessageSet(self):
"""
If the search filter contains a I{NOT} term with a
subexpression which includes a message sequence set wildcard,
no messages in that set are considered for inclusion in the
result.
"""
return self._messageSetSearchTest('NOT 2:*', [1])
def test_searchAndMessageSet(self):
"""
If the search filter contains multiple terms implicitly
conjoined with a message sequence set wildcard, only the
intersection of the results of each term are returned.
"""
return self._messageSetSearchTest('2:* 3', [3])
class FetchSearchStoreTestCase(unittest.TestCase, IMAP4HelperMixin):
implements(imap4.ISearchableMailbox)
def setUp(self):
self.expected = self.result = None
self.server_received_query = None
self.server_received_uid = None
self.server_received_parts = None
self.server_received_messages = None
self.server = imap4.IMAP4Server()
self.server.state = 'select'
self.server.mbox = self
self.connected = defer.Deferred()
self.client = SimpleClient(self.connected)
def search(self, query, uid):
self.server_received_query = query
self.server_received_uid = uid
return self.expected
def addListener(self, *a, **kw):
pass
removeListener = addListener
def _searchWork(self, uid):
def search():
return self.client.search(self.query, uid=uid)
def result(R):
self.result = R
self.connected.addCallback(strip(search)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
def check(ignored):
# Ensure no short-circuiting wierdness is going on
self.failIf(self.result is self.expected)
self.assertEquals(self.result, self.expected)
self.assertEquals(self.uid, self.server_received_uid)
self.assertEquals(
imap4.parseNestedParens(self.query),
self.server_received_query
)
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(check)
return d
def testSearch(self):
self.query = imap4.Or(
imap4.Query(header=('subject', 'substring')),
imap4.Query(larger=1024, smaller=4096),
)
self.expected = [1, 4, 5, 7]
self.uid = 0
return self._searchWork(0)
def testUIDSearch(self):
self.query = imap4.Or(
imap4.Query(header=('subject', 'substring')),
imap4.Query(larger=1024, smaller=4096),
)
self.uid = 1
self.expected = [1, 2, 3]
return self._searchWork(1)
def getUID(self, msg):
try:
return self.expected[msg]['UID']
except (TypeError, IndexError):
return self.expected[msg-1]
except KeyError:
return 42
def fetch(self, messages, uid):
self.server_received_uid = uid
self.server_received_messages = str(messages)
return self.expected
def _fetchWork(self, fetch):
def result(R):
self.result = R
self.connected.addCallback(strip(fetch)
).addCallback(result
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
def check(ignored):
# Ensure no short-circuiting wierdness is going on
self.failIf(self.result is self.expected)
self.parts and self.parts.sort()
self.server_received_parts and self.server_received_parts.sort()
if self.uid:
for (k, v) in self.expected.items():
v['UID'] = str(k)
self.assertEquals(self.result, self.expected)
self.assertEquals(self.uid, self.server_received_uid)
self.assertEquals(self.parts, self.server_received_parts)
self.assertEquals(imap4.parseIdList(self.messages),
imap4.parseIdList(self.server_received_messages))
d = loopback.loopbackTCP(self.server, self.client, noisy=False)
d.addCallback(check)
return d
class FakeMailbox:
def __init__(self):
self.args = []
def addMessage(self, body, flags, date):
self.args.append((body, flags, date))
return defer.succeed(None)
class FeaturefulMessage:
implements(imap4.IMessageFile)
def getFlags(self):
return 'flags'
def getInternalDate(self):
return 'internaldate'
def open(self):
return StringIO("open")
class MessageCopierMailbox:
implements(imap4.IMessageCopier)
def __init__(self):
self.msgs = []
def copy(self, msg):
self.msgs.append(msg)
return len(self.msgs)
class CopyWorkerTestCase(unittest.TestCase):
def testFeaturefulMessage(self):
s = imap4.IMAP4Server()
# Yes. I am grabbing this uber-non-public method to test it.
# It is complex. It needs to be tested directly!
# Perhaps it should be refactored, simplified, or split up into
# not-so-private components, but that is a task for another day.
# Ha ha! Addendum! Soon it will be split up, and this test will
# be re-written to just use the default adapter for IMailbox to
# IMessageCopier and call .copy on that adapter.
f = s._IMAP4Server__cbCopy
m = FakeMailbox()
d = f([(i, FeaturefulMessage()) for i in range(1, 11)], 'tag', m)
def cbCopy(results):
for a in m.args:
self.assertEquals(a[0].read(), "open")
self.assertEquals(a[1], "flags")
self.assertEquals(a[2], "internaldate")
for (status, result) in results:
self.failUnless(status)
self.assertEquals(result, None)
return d.addCallback(cbCopy)
def testUnfeaturefulMessage(self):
s = imap4.IMAP4Server()
# See above comment
f = s._IMAP4Server__cbCopy
m = FakeMailbox()
msgs = [FakeyMessage({'Header-Counter': str(i)}, (), 'Date', 'Body %d' % (i,), i + 10, None) for i in range(1, 11)]
d = f([im for im in zip(range(1, 11), msgs)], 'tag', m)
def cbCopy(results):
seen = []
for a in m.args:
seen.append(a[0].read())
self.assertEquals(a[1], ())
self.assertEquals(a[2], "Date")
seen.sort()
exp = ["Header-Counter: %d\r\n\r\nBody %d" % (i, i) for i in range(1, 11)]
exp.sort()
self.assertEquals(seen, exp)
for (status, result) in results:
self.failUnless(status)
self.assertEquals(result, None)
return d.addCallback(cbCopy)
def testMessageCopier(self):
s = imap4.IMAP4Server()
# See above comment
f = s._IMAP4Server__cbCopy
m = MessageCopierMailbox()
msgs = [object() for i in range(1, 11)]
d = f([im for im in zip(range(1, 11), msgs)], 'tag', m)
def cbCopy(results):
self.assertEquals(results, zip([1] * 10, range(1, 11)))
for (orig, new) in zip(msgs, m.msgs):
self.assertIdentical(orig, new)
return d.addCallback(cbCopy)
class TLSTestCase(IMAP4HelperMixin, unittest.TestCase):
serverCTX = ServerTLSContext and ServerTLSContext()
clientCTX = ClientTLSContext and ClientTLSContext()
def loopback(self):
return loopback.loopbackTCP(self.server, self.client, noisy=False)
def testAPileOfThings(self):
SimpleServer.theAccount.addMailbox('inbox')
called = []
def login():
called.append(None)
return self.client.login('testuser', 'password-test')
def list():
called.append(None)
return self.client.list('inbox', '%')
def status():
called.append(None)
return self.client.status('inbox', 'UIDNEXT')
def examine():
called.append(None)
return self.client.examine('inbox')
def logout():
called.append(None)
return self.client.logout()
self.client.requireTransportSecurity = True
methods = [login, list, status, examine, logout]
map(self.connected.addCallback, map(strip, methods))
self.connected.addCallbacks(self._cbStopClient, self._ebGeneral)
def check(ignored):
self.assertEquals(self.server.startedTLS, True)
self.assertEquals(self.client.startedTLS, True)
self.assertEquals(len(called), len(methods))
d = self.loopback()
d.addCallback(check)
return d
def testLoginLogin(self):
self.server.checker.addUser('testuser', 'password-test')
success = []
self.client.registerAuthenticator(imap4.LOGINAuthenticator('testuser'))
self.connected.addCallback(
lambda _: self.client.authenticate('password-test')
).addCallback(
lambda _: self.client.logout()
).addCallback(success.append
).addCallback(self._cbStopClient
).addErrback(self._ebGeneral)
d = self.loopback()
d.addCallback(lambda x : self.assertEquals(len(success), 1))
return d
def test_startTLS(self):
"""
L{IMAP4Client.startTLS} triggers TLS negotiation and returns a
L{Deferred} which fires after the client's transport is using
encryption.
"""
success = []
self.connected.addCallback(lambda _: self.client.startTLS())
def checkSecure(ignored):
self.assertTrue(
interfaces.ISSLTransport.providedBy(self.client.transport))
self.connected.addCallback(checkSecure)
self.connected.addCallback(self._cbStopClient)
self.connected.addCallback(success.append)
self.connected.addErrback(self._ebGeneral)
d = self.loopback()
d.addCallback(lambda x : self.failUnless(success))
return defer.gatherResults([d, self.connected])
def testFailedStartTLS(self):
failure = []
def breakServerTLS(ign):
self.server.canStartTLS = False
self.connected.addCallback(breakServerTLS)
self.connected.addCallback(lambda ign: self.client.startTLS())
self.connected.addErrback(lambda err: failure.append(err.trap(imap4.IMAP4Exception)))
self.connected.addCallback(self._cbStopClient)
self.connected.addErrback(self._ebGeneral)
def check(ignored):
self.failUnless(failure)
self.assertIdentical(failure[0], imap4.IMAP4Exception)
return self.loopback().addCallback(check)
class SlowMailbox(SimpleMailbox):
howSlow = 2
callLater = None
fetchDeferred = None
# Not a very nice implementation of fetch(), but it'll
# do for the purposes of testing.
def fetch(self, messages, uid):
d = defer.Deferred()
self.callLater(self.howSlow, d.callback, ())
self.fetchDeferred.callback(None)
return d
class Timeout(IMAP4HelperMixin, unittest.TestCase):
def test_serverTimeout(self):
"""
The *client* has a timeout mechanism which will close connections that
are inactive for a period.
"""
c = Clock()
self.server.timeoutTest = True
self.client.timeout = 5 #seconds
self.client.callLater = c.callLater
self.selectedArgs = None
def login():
d = self.client.login('testuser', 'password-test')
c.advance(5)
d.addErrback(timedOut)
return d
def timedOut(failure):
self._cbStopClient(None)
failure.trap(error.TimeoutError)
d = self.connected.addCallback(strip(login))
d.addErrback(self._ebGeneral)
return defer.gatherResults([d, self.loopback()])
def test_longFetchDoesntTimeout(self):
"""
The connection timeout does not take effect during fetches.
"""
c = Clock()
SlowMailbox.callLater = c.callLater
SlowMailbox.fetchDeferred = defer.Deferred()
self.server.callLater = c.callLater
SimpleServer.theAccount.mailboxFactory = SlowMailbox
SimpleServer.theAccount.addMailbox('mailbox-test')
self.server.setTimeout(1)
def login():
return self.client.login('testuser', 'password-test')
def select():
self.server.setTimeout(1)
return self.client.select('mailbox-test')
def fetch():
return self.client.fetchUID('1:*')
def stillConnected():
self.assertNotEquals(self.server.state, 'timeout')
def cbAdvance(ignored):
for i in xrange(4):
c.advance(.5)
SlowMailbox.fetchDeferred.addCallback(cbAdvance)
d1 = self.connected.addCallback(strip(login))
d1.addCallback(strip(select))
d1.addCallback(strip(fetch))
d1.addCallback(strip(stillConnected))
d1.addCallback(self._cbStopClient)
d1.addErrback(self._ebGeneral)
d = defer.gatherResults([d1, self.loopback()])
return d
def test_idleClientDoesDisconnect(self):
"""
The *server* has a timeout mechanism which will close connections that
are inactive for a period.
"""
c = Clock()
# Hook up our server protocol
transport = StringTransportWithDisconnection()
transport.protocol = self.server
self.server.callLater = c.callLater
self.server.makeConnection(transport)
# Make sure we can notice when the connection goes away
lost = []
connLost = self.server.connectionLost
self.server.connectionLost = lambda reason: (lost.append(None), connLost(reason))[1]
# 2/3rds of the idle timeout elapses...
c.pump([0.0] + [self.server.timeOut / 3.0] * 2)
self.failIf(lost, lost)
# Now some more
c.pump([0.0, self.server.timeOut / 2.0])
self.failUnless(lost)
class Disconnection(unittest.TestCase):
def testClientDisconnectFailsDeferreds(self):
c = imap4.IMAP4Client()
t = StringTransportWithDisconnection()
c.makeConnection(t)
d = self.assertFailure(c.login('testuser', 'example.com'), error.ConnectionDone)
c.connectionLost(error.ConnectionDone("Connection closed"))
return d
class SynchronousMailbox(object):
"""
Trivial, in-memory mailbox implementation which can produce a message
synchronously.
"""
def __init__(self, messages):
self.messages = messages
def fetch(self, msgset, uid):
assert not uid, "Cannot handle uid requests."
for msg in msgset:
yield msg, self.messages[msg - 1]
class StringTransportConsumer(StringTransport):
producer = None
streaming = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.streaming = streaming
class Pipelining(unittest.TestCase):
"""
Tests for various aspects of the IMAP4 server's pipelining support.
"""
messages = [
FakeyMessage({}, [], '', '0', None, None),
FakeyMessage({}, [], '', '1', None, None),
FakeyMessage({}, [], '', '2', None, None),
]
def setUp(self):
self.iterators = []
self.transport = StringTransportConsumer()
self.server = imap4.IMAP4Server(None, None, self.iterateInReactor)
self.server.makeConnection(self.transport)
def iterateInReactor(self, iterator):
d = defer.Deferred()
self.iterators.append((iterator, d))
return d
def tearDown(self):
self.server.connectionLost(failure.Failure(error.ConnectionDone()))
def test_synchronousFetch(self):
"""
Test that pipelined FETCH commands which can be responded to
synchronously are responded to correctly.
"""
mailbox = SynchronousMailbox(self.messages)
# Skip over authentication and folder selection
self.server.state = 'select'
self.server.mbox = mailbox
# Get rid of any greeting junk
self.transport.clear()
# Here's some pipelined stuff
self.server.dataReceived(
'01 FETCH 1 BODY[]\r\n'
'02 FETCH 2 BODY[]\r\n'
'03 FETCH 3 BODY[]\r\n')
# Flush anything the server has scheduled to run
while self.iterators:
for e in self.iterators[0][0]:
break
else:
self.iterators.pop(0)[1].callback(None)
# The bodies are empty because we aren't simulating a transport
# exactly correctly (we have StringTransportConsumer but we never
# call resumeProducing on its producer). It doesn't matter: just
# make sure the surrounding structure is okay, and that no
# exceptions occurred.
self.assertEquals(
self.transport.value(),
'* 1 FETCH (BODY[] )\r\n'
'01 OK FETCH completed\r\n'
'* 2 FETCH (BODY[] )\r\n'
'02 OK FETCH completed\r\n'
'* 3 FETCH (BODY[] )\r\n'
'03 OK FETCH completed\r\n')
if ClientTLSContext is None:
for case in (TLSTestCase,):
case.skip = "OpenSSL not present"
elif interfaces.IReactorSSL(reactor, None) is None:
for case in (TLSTestCase,):
case.skip = "Reactor doesn't support SSL"
|
kevin-coder/tensorflow-fork
|
refs/heads/master
|
tensorflow/contrib/rate/rate_test.py
|
23
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Rate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rate import rate
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testBuildRate(self):
m = rate.Rate()
m.build(
constant_op.constant([1], dtype=dtypes.float32),
constant_op.constant([2], dtype=dtypes.float32))
old_numer = m.numer
m(
constant_op.constant([2], dtype=dtypes.float32),
constant_op.constant([2], dtype=dtypes.float32))
self.assertTrue(old_numer is m.numer)
@test_util.run_in_graph_and_eager_modes()
def testBasic(self):
with self.cached_session():
r_ = rate.Rate()
a = r_(array_ops.ones([1]), denominator=array_ops.ones([1]))
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
self.assertEqual([[1]], self.evaluate(a))
b = r_(constant_op.constant([2]), denominator=constant_op.constant([2]))
self.assertEqual([[1]], self.evaluate(b))
c = r_(constant_op.constant([4]), denominator=constant_op.constant([3]))
self.assertEqual([[2]], self.evaluate(c))
d = r_(constant_op.constant([16]), denominator=constant_op.constant([3]))
self.assertEqual([[0]], self.evaluate(d)) # divide by 0
def testNamesWithSpaces(self):
m1 = rate.Rate(name="has space")
m1(array_ops.ones([1]), array_ops.ones([1]))
self.assertEqual(m1.name, "has space")
self.assertEqual(m1.prev_values.name, "has_space_1/prev_values:0")
@test_util.run_in_graph_and_eager_modes()
def testWhileLoop(self):
with self.cached_session():
r_ = rate.Rate()
def body(value, denom, i, ret_rate):
i += 1
ret_rate = r_(value, denom)
with ops.control_dependencies([ret_rate]):
value = math_ops.add(value, 2)
denom = math_ops.add(denom, 1)
return [value, denom, i, ret_rate]
def condition(v, d, i, r):
del v, d, r # unused vars by condition
return math_ops.less(i, 100)
i = constant_op.constant(0)
value = constant_op.constant([1], dtype=dtypes.float64)
denom = constant_op.constant([1], dtype=dtypes.float64)
ret_rate = r_(value, denom)
self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.local_variables_initializer())
loop = control_flow_ops.while_loop(condition, body,
[value, denom, i, ret_rate])
self.assertEqual([[2]], self.evaluate(loop[3]))
if __name__ == "__main__":
test.main()
|
lemaiyan/yowsup
|
refs/heads/master
|
yowsup/layers/axolotl/protocolentities/test_iq_keys_set.py
|
68
|
from yowsup.layers.protocol_iq.protocolentities.test_iq import IqProtocolEntityTest
from yowsup.layers.axolotl.protocolentities import SetKeysIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
class SetKeysIqProtocolEntityTest(IqProtocolEntityTest):
def setUp(self):
super(SetKeysIqProtocolEntityTest, self).setUp()
# self.ProtocolEntity = SetKeysIqProtocolEntity
#
# regNode = ProtocolTreeNode("registration", data = "abcd")
# idNode = ProtocolTreeNode("identity", data = "efgh")
# typeNode = ProtocolTreeNode("type", data = "ijkl")
# listNode = ProtocolTreeNode("list")
# for i in range(0, 2):
# keyNode = ProtocolTreeNode("key", children=[
# ProtocolTreeNode("id", data = "id_%s" % i),
# ProtocolTreeNode("value", data = "val_%s" % i)
# ])
# listNode.addChild(keyNode)
#
# self.node.addChildren([regNode, idNode, typeNode, listNode])
|
jordotech/sherri_satchmo
|
refs/heads/master
|
satchmo/apps/satchmo_store/accounts/views.py
|
6
|
from django.conf import settings
from django.contrib.auth import login, REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site, RequestSite
from django.core import urlresolvers
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.generic.base import TemplateView
from forms import RegistrationAddressForm, RegistrationForm, EmailAuthenticationForm
from l10n.models import Country
from livesettings import config_get_group, config_value
from satchmo_store.accounts.mail import send_welcome_email
from satchmo_store.accounts import signals
from satchmo_store.contact import CUSTOMER_ID
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Config, Cart
import logging
log = logging.getLogger('satchmo_store.accounts.views')
YESNO = (
(1, _('Yes')),
(0, _('No'))
)
def emaillogin(request, template_name='registration/login.html',
auth_form=EmailAuthenticationForm, redirect_field_name=REDIRECT_FIELD_NAME):
"Displays the login form and handles the login action. Altered to use the EmailAuthenticationForm"
redirect_to = request.REQUEST.get(redirect_field_name, '')
# Avoid redirecting to logout if the user clicked on login after logout
if redirect_to == urlresolvers.reverse('auth_logout'):
redirect_to = None
success, todo = _login(request, redirect_to)
if success:
# return the response redirect
return todo
else:
# continue with the login form
form = todo
request.session.set_test_cookie()
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
return render_to_response(template_name, {
'form': form,
redirect_field_name: redirect_to,
'site_name': current_site.name,
}, context_instance=RequestContext(request))
emaillogin = never_cache(emaillogin)
def _login(request, redirect_to, auth_form=EmailAuthenticationForm):
""""Altered version of the default login, intended to be called by `combined_login`.
Returns tuple:
- success
- redirect (success) or form (on failure)
"""
if request.method == 'POST':
form = auth_form(data=request.POST)
if form.is_valid():
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
login(request, form.get_user())
# Now that we've logged in, assign this cart to our user
_assign_cart(request)
if config_value('SHOP','PERSISTENT_CART'):
_get_prev_cart(request)
return (True, HttpResponseRedirect(redirect_to))
else:
log.debug(form.errors)
else:
form = auth_form(request)
return (False, form)
def _assign_cart(request):
"""
If there is a current cart and it is unassigned, assign it to this user.
"""
try:
if 'cart' in request.session:
existing_cart = Cart.objects.from_request(request, create=False)
contact = Contact.objects.from_request(request)
if existing_cart.customer == None:
# No currently assigned contact: Up for grabs!
log.debug("Assigning Cart (id: %r) to %r (id: %r)" % (existing_cart.id, contact.full_name, contact.id))
existing_cart.customer = contact
existing_cart.save()
else:
log.debug("The user has no cart in the current session.")
except:
log.debug("Unable to assign cart user during login")
def _get_prev_cart(request):
try:
contact = Contact.objects.from_request(request)
if not contact:
return None
saved_cart = contact.cart_set.latest('date_time_created')
# If the latest cart has len == 0, cart is unusable.
if saved_cart and len(saved_cart):
if 'cart' in request.session:
existing_cart = Cart.objects.from_request(request, create=False)
if existing_cart.pk != saved_cart.pk and ( (len(existing_cart) == 0) or config_value('SHOP','PERSISTENT_CART_MERGE') ):
# Merge the two carts together
saved_cart.merge_carts(existing_cart)
request.session['cart'] = saved_cart.id
log.debug('retrieved cart: %s', saved_cart)
return saved_cart
except Exception:
return None
def register_handle_address_form(request, redirect=None, action_required=''):
"""
Handle all registration logic. This is broken out from "register" to allow easy overriding/hooks
such as a combined login/register page.
This handler allows a login or a full registration including address.
Returns:
- Success flag
- HTTPResponseRedirect (success) or form (fail)
- A dictionary with extra context fields
"""
shop = Config.objects.get_current()
try:
contact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
contact = None
if request.method == 'POST' and request.POST.get('action', '') == action_required:
form = RegistrationAddressForm(request.POST, shop=shop, contact=contact)
if form.is_valid():
contact = form.save(request, force_new=True)
if not redirect:
redirect = urlresolvers.reverse('registration_complete')
return (True, HttpResponseRedirect(redirect))
else:
log.debug("createform errors: %s", form.errors)
else:
initial_data = {}
if contact:
initial_data = {
'email': contact.email,
'first_name': contact.first_name,
'last_name': contact.last_name }
address = contact.billing_address
if address:
initial_data['street1'] = address.street1
initial_data['street2'] = address.street2
initial_data['state'] = address.state
initial_data['city'] = address.city
initial_data['postal_code'] = address.postal_code
try:
initial_data['country'] = address.country
except Country.DoesNotExist:
USA = Country.objects.get(iso2_code__exact="US")
initial_data['country'] = USA
form = RegistrationAddressForm(initial=initial_data, shop=shop, contact=contact)
return (False, form, {'country' : shop.in_country_only})
def register_handle_form(request, redirect=None):
"""
Handle all registration logic. This is broken out from "register" to allow easy overriding/hooks
such as a combined login/register page.
This method only presents a typical login or register form, not a full address form
(see register_handle_address_form for that one.)
Returns:
- Success flag
- HTTPResponseRedirect (success) or form (fail)
"""
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
contact = form.save(request, force_new=True)
# look for explicit "next"
next = request.POST.get('next', '')
if not next:
if redirect:
next = redirect
else:
next = urlresolvers.reverse('registration_complete')
return (True, HttpResponseRedirect(next))
else:
initial_data = {}
try:
contact = Contact.objects.from_request(request, create=False)
initial_data = {
'email': contact.email,
'first_name': contact.first_name,
'last_name': contact.last_name,
}
except Contact.DoesNotExist:
log.debug("No contact in request")
contact = None
initial_data['next'] = request.GET.get('next', '')
form = RegistrationForm(initial=initial_data)
return (False, form)
#---- Views
def activate(request, activation_key):
"""
Activates a user's account, if their key is valid and hasn't
expired.
"""
from registration.models import RegistrationProfile
activation_key = activation_key.lower()
account = RegistrationProfile.objects.activate_user(activation_key)
if account:
# ** hack for logging in the user **
# when the login form is posted, user = authenticate(username=data['username'], password=data['password'])
# ...but we cannot authenticate without password... so we work-around authentication
account.backend = settings.AUTHENTICATION_BACKENDS[0]
_login(request, account)
try:
contact = Contact.objects.get(user=account)
request.session[CUSTOMER_ID] = contact.id
send_welcome_email(contact.email, contact.first_name, contact.last_name)
signals.satchmo_registration_verified.send(contact, contact=contact)
except Contact.DoesNotExist:
# Treated for better compatibility with registation tests without error
pass
context = RequestContext(request, {
'account': account,
'expiration_days': config_value('SHOP', 'ACCOUNT_ACTIVATION_DAYS'),
})
return render_to_response('registration/activate.html',
context_instance=context)
def login_signup(request,
template_name="contact/login_signup.html",
registration_handler=register_handle_form,
handler_kwargs = {}):
"""Display/handle a combined login and create account form"""
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, '')
handler_kwargs['redirect'] = redirect_to
loginform = None
createform = None
extra_context = {}
if request.POST:
action = request.POST.get('action', 'login')
if action == 'create':
#log.debug('Signup form')
ret = registration_handler(request, **handler_kwargs)
success = ret[0]
todo = ret[1]
if len(ret) > 2:
extra_context = ret[2]
if success:
#log.debug('Successful %s form submit, sending to reg complete page')
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
ctx = RequestContext(request, {
REDIRECT_FIELD_NAME: redirect_to,
})
return render_to_response('registration/registration_complete.html',
context_instance=ctx)
else:
createform = todo
else:
#log.debug('Login form')
success, todo = _login(request, redirect_to)
if success:
return todo
else:
loginform = todo
request.POST = QueryDict("")
else:
request.session.set_test_cookie()
if not loginform:
success, loginform = _login(request, redirect_to)
if not createform:
ret = registration_handler(request, **handler_kwargs)
success = ret[0]
createform = ret[1]
if len(ret) > 2:
extra_context = ret[2]
site = Site.objects.get_current()
if config_get_group('NEWSLETTER'):
show_newsletter = True
else:
show_newsletter = False
ctx = {
'loginform': loginform,
'createform' : createform,
REDIRECT_FIELD_NAME: redirect_to,
'site_name': site.name,
'show_newsletter' : show_newsletter,
}
if extra_context:
ctx.update(extra_context)
context = RequestContext(request, ctx)
return render_to_response(template_name, context_instance=context)
def login_signup_address(request, template_name="contact/login_signup_address.html"):
"""
View which allows a user to login or else fill out a full address form.
"""
return login_signup(request,
template_name=template_name,
registration_handler=register_handle_address_form,
handler_kwargs={'action_required' : 'create'})
def register(request, redirect=None, template='registration/registration_form.html'):
"""
Allows a new user to register an account.
"""
ret = register_handle_form(request, redirect)
success = ret[0]
todo = ret[1]
if len(ret) > 2:
extra_context = ret[2]
else:
extra_context = {}
if success:
return todo
else:
if config_get_group('NEWSLETTER'):
show_newsletter = True
else:
show_newsletter = False
ctx = {
'form': todo,
'title' : _('Registration Form'),
'show_newsletter' : show_newsletter,
'allow_nickname' : config_value('SHOP', 'ALLOW_NICKNAME_USERNAME')
}
if extra_context:
ctx.update(extra_context)
context = RequestContext(request, ctx)
return render_to_response(template, context_instance=context)
class RegistrationComplete(TemplateView):
def get_context_data(self, **kwargs):
context = super(RegistrationComplete, self).get_context_data(**kwargs)
verify = (config_value('SHOP', 'ACCOUNT_VERIFICATION') == 'EMAIL')
context.update(verify=verify)
return context
|
jhseu/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/lrn_ops_test.py
|
24
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Local Response Normalization ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
# Local response normalization tests. The forward tests are copied from
# tensorflow/python/kernel_tests/lrn_op_test.py
class LRNTest(xla_test.XLATestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta
lrn_depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
beta = 2.0 * np.random.rand()
with self.test_scope():
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
def testLrnGrad(self):
# Test for LRNGrad that compares against the CPU implementation.
shape = [1, 2, 3, 4]
total_size = np.prod(shape)
in_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_grads_vals = np.arange(1, total_size + 1, dtype=np.float32)
depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
beta = 1.0 * np.random.rand()
with self.session():
in_image = constant_op.constant(in_image_vals, shape=shape)
out_image = constant_op.constant(out_image_vals, shape=shape)
out_grads = constant_op.constant(out_grads_vals, shape=shape)
with ops.device(CPU_DEVICE):
expected = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
with self.test_scope():
actual = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
expected_val = self.evaluate(expected)
actual_val = self.evaluate(actual)
self.assertAllClose(actual_val, expected_val, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
norayr/unisubs
|
refs/heads/staging
|
apps/videos/migrations/0015_auto__add_field_action_comment.py
|
5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("comments", "0001_initial"),
)
def forwards(self, orm):
# Adding field 'Action.comment'
db.add_column('videos_action', 'comment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['comments.Comment'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Action.comment'
db.delete_column('videos_action', 'comment_id')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nulltranslations': {
'Meta': {'object_name': 'NullTranslations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nullvideocaptions': {
'Meta': {'object_name': 'NullVideoCaptions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.proxyvideo': {
'Meta': {'object_name': 'ProxyVideo', '_ormbases': ['videos.Video']},
'video_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True', 'primary_key': 'True'})
},
'videos.translation': {
'Meta': {'object_name': 'Translation'},
'caption_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_translations': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullTranslations']", 'null': 'True'}),
'translation_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationVersion']", 'null': 'True'})
},
'videos.translationlanguage': {
'Meta': {'object_name': 'TranslationLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.translationversion': {
'Meta': {'object_name': 'TranslationVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'bliptv_fileid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'bliptv_flv_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'video_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '2048'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'youtube_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'videos.videocaption': {
'Meta': {'object_name': 'VideoCaption'},
'caption_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'caption_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'end_time': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_captions': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullVideoCaptions']", 'null': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.VideoCaptionVersion']", 'null': 'True'})
},
'videos.videocaptionversion': {
'Meta': {'object_name': 'VideoCaptionVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
}
}
complete_apps = ['videos']
|
robertkowalski/node-gyp
|
refs/heads/master
|
gyp/tools/graphviz.py
|
2679
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/refactoring/makeFunctionTopLevel/methodInsertionPositionUsageInAnotherFile/after/other.py
|
31
|
def already_existing1():
pass
def already_existing2():
pass
from main import C
def method():
pass
method()
def already_existing3():
pass
|
suncycheng/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/relativeSimple/sys.py
|
83
|
token = "local"
|
mdakin/engine
|
refs/heads/master
|
build/android/pylib/local/local_test_server_spawner.py
|
58
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib import chrome_test_server_spawner
from pylib import forwarder
from pylib.base import test_server
class LocalTestServerSpawner(test_server.TestServer):
def __init__(self, port, device, tool):
super(LocalTestServerSpawner, self).__init__()
self._device = device
self._spawning_server = chrome_test_server_spawner.SpawningServer(
port, device, tool)
self._tool = tool
@property
def server_address(self):
return self._spawning_server.server.server_address
@property
def port(self):
return self.server_address[1]
#override
def SetUp(self):
self._device.WriteFile(
'%s/net-test-server-ports' % self._device.GetExternalStoragePath(),
'%s:0' % str(self.port))
forwarder.Forwarder.Map(
[(self.port, self.port)], self._device, self._tool)
self._spawning_server.Start()
#override
def Reset(self):
self._spawning_server.CleanupState()
#override
def TearDown(self):
self.Reset()
self._spawning_server.Stop()
forwarder.Forwarder.UnmapDevicePort(self.port, self._device)
|
hpcloud-mon/tempest
|
refs/heads/master
|
tempest/api/identity/__init__.py
|
44
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
# All identity tests -- single setup function
def setup_package():
LOG.debug("Entering tempest.api.identity.setup_package")
|
thedrow/django
|
refs/heads/master
|
django/db/backends/postgresql_psycopg2/operations.py
|
207
|
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
|
capveg/bigcode
|
refs/heads/master
|
tools/infra.py
|
7
|
#!/usr/bin/env python
################################################################
#
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
################################################################
#
# Resolve dependencies on the infra repository.
#
################################################################
import os
import sys
# The root of the repository
ROOT = os.path.realpath("%s/.." % (os.path.dirname(__file__)))
SUBMODULE_INFRA = os.getenv("SUBMODULE_INFRA")
if SUBMODULE_INFRA is None:
SUBMODULE_INFRA = "%s/submodules/infra" % ROOT
if not os.path.exists("%s/builder/unix/tools" % SUBMODULE_INFRA):
raise Exception("This script requires the infra repository.")
sys.path.append("%s/builder/unix/tools" % SUBMODULE_INFRA)
|
kenwang76/readthedocs.org
|
refs/heads/master
|
readthedocs/vcs_support/backends/hg.py
|
34
|
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.vcs_support.base import BaseVCS, VCSVersion
class Backend(BaseVCS):
supports_tags = True
supports_branches = True
fallback_branch = 'default'
def update(self):
super(Backend, self).update()
retcode = self.run('hg', 'status')[0]
if retcode == 0:
return self.pull()
else:
return self.clone()
def pull(self):
pull_output = self.run('hg', 'pull')
if pull_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg pull): %s"
% (self.repo_url, pull_output[0]))
)
update_output = self.run('hg', 'update', '-C')[0]
if update_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg update): %s"
% (self.repo_url, pull_output[0]))
)
return update_output
def clone(self):
self.make_clean_working_dir()
output = self.run('hg', 'clone', self.repo_url, '.')
if output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg clone): %s"
% (self.repo_url, output[0]))
)
return output
@property
def branches(self):
retcode, stdout = self.run('hg', 'branches', '-q')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_branches(stdout)
def parse_branches(self, data):
"""
stable
default
"""
names = [name.lstrip() for name in data.splitlines()]
return [VCSVersion(self, name, name) for name in names if name]
@property
def tags(self):
retcode, stdout = self.run('hg', 'tags')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_tags(stdout)
def parse_tags(self, data):
"""
Parses output of `hg tags`, eg:
tip 278:c4b2d21db51a
0.2.2 152:6b0364d98837
0.2.1 117:a14b7b6ffa03
0.1 50:30c2c6b3a055
maintenance release 1 10:f83c32fe8126
Into VCSVersion objects with the tag name as verbose_name and the
commit hash as identifier.
"""
vcs_tags = []
tag_lines = [line.strip() for line in data.splitlines()]
# starting from the rhs of each line, split a single value (changeset)
# off at whitespace; the tag name is the string to the left of that
tag_pairs = [line.rsplit(None, 1) for line in tag_lines]
for row in tag_pairs:
if len(row) != 2:
continue
name, commit = row
if name == 'tip':
continue
revision, commit_hash = commit.split(':')
vcs_tags.append(VCSVersion(self, commit_hash, name))
return vcs_tags
@property
def commit(self):
retcode, stdout = self.run('hg', 'id', '-i')[:2]
return stdout.strip()
def checkout(self, identifier=None):
super(Backend, self).checkout()
if not identifier:
identifier = 'tip'
retcode = self.run('hg', 'status')[0]
if retcode == 0:
self.run('hg', 'pull')
return self.run('hg', 'update', '-C', identifier)
else:
self.clone()
return self.run('hg', 'update', '-C', identifier)
|
2014c2g3/0623exam
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/browser/websocket_1.py
|
618
|
from browser import window
import javascript
WebSocket = javascript.JSConstructor(window.WebSocket)
|
gangadharkadam/sher
|
refs/heads/develop
|
erpnext/accounts/doctype/journal_voucher/journal_voucher.py
|
5
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, fmt_money, formatdate, getdate
from frappe import msgprint, _, scrub
from erpnext.setup.utils import get_company_currency
from erpnext.controllers.accounts_controller import AccountsController
class JournalVoucher(AccountsController):
def __init__(self, arg1, arg2=None):
super(JournalVoucher, self).__init__(arg1, arg2)
self.master_type = {}
self.credit_days_for = {}
self.credit_days_global = -1
self.is_approving_authority = -1
def validate(self):
if not self.is_opening:
self.is_opening='No'
self.clearance_date = None
super(JournalVoucher, self).validate_date_with_fiscal_year()
self.validate_cheque_info()
self.validate_entries_for_advance()
self.validate_debit_and_credit()
self.validate_against_jv()
self.validate_against_sales_invoice()
self.validate_against_purchase_invoice()
self.set_against_account()
self.create_remarks()
self.set_aging_date()
self.set_print_format_fields()
self.validate_against_sales_order()
self.validate_against_purchase_order()
def on_submit(self):
if self.voucher_type in ['Bank Voucher', 'Contra Voucher', 'Journal Entry']:
self.check_credit_days()
self.make_gl_entries()
self.check_credit_limit()
self.update_advance_paid()
def update_advance_paid(self):
advance_paid = frappe._dict()
for d in self.get("entries"):
if d.is_advance:
if d.against_sales_order:
advance_paid.setdefault("Sales Order", []).append(d.against_sales_order)
elif d.against_purchase_order:
advance_paid.setdefault("Purchase Order", []).append(d.against_purchase_order)
for voucher_type, order_list in advance_paid.items():
for voucher_no in list(set(order_list)):
frappe.get_doc(voucher_type, voucher_no).set_total_advance_paid()
def on_cancel(self):
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_jv")
self.make_gl_entries(1)
self.update_advance_paid()
def validate_cheque_info(self):
if self.voucher_type in ['Bank Voucher']:
if not self.cheque_no or not self.cheque_date:
msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type),
raise_exception=1)
if self.cheque_date and not self.cheque_no:
msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1)
def validate_entries_for_advance(self):
for d in self.get('entries'):
if not d.is_advance and not d.against_voucher and \
not d.against_invoice and not d.against_jv:
master_type = frappe.db.get_value("Account", d.account, "master_type")
if (master_type == 'Customer' and flt(d.credit) > 0) or \
(master_type == 'Supplier' and flt(d.debit) > 0):
msgprint(_("Row {0}: Please check 'Is Advance' against Account {1} if this \
is an advance entry.").format(d.idx, d.account))
def validate_against_jv(self):
for d in self.get('entries'):
if d.against_jv:
if d.against_jv == self.name:
frappe.throw(_("You can not enter current voucher in 'Against Journal Voucher' column"))
against_entries = frappe.db.sql("""select * from `tabJournal Voucher Detail`
where account = %s and docstatus = 1 and parent = %s
and ifnull(against_jv, '') = ''""", (d.account, d.against_jv), as_dict=True)
if not against_entries:
frappe.throw(_("Journal Voucher {0} does not have account {1} or already matched")
.format(d.against_jv, d.account))
else:
dr_or_cr = "debit" if d.credit > 0 else "credit"
valid = False
for jvd in against_entries:
if flt(jvd[dr_or_cr]) > 0:
valid = True
if not valid:
frappe.throw(_("Against Journal Voucher {0} does not have any unmatched {1} entry")
.format(d.against_jv, dr_or_cr))
def validate_against_sales_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_invoice", "Sales Invoice")
self.validate_against_invoice_fields("Sales Invoice", payment_against_voucher)
def validate_against_purchase_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_voucher", "Purchase Invoice")
self.validate_against_invoice_fields("Purchase Invoice", payment_against_voucher)
def validate_against_sales_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_sales_order", "Sales Order")
self.validate_against_order_fields("Sales Order", payment_against_voucher)
def validate_against_purchase_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_purchase_order", "Purchase Order")
self.validate_against_order_fields("Purchase Order", payment_against_voucher)
def validate_account_in_against_voucher(self, against_field, doctype):
payment_against_voucher = frappe._dict()
field_dict = {'Sales Invoice': "Debit To",
'Purchase Invoice': "Credit To",
'Sales Order': "Customer",
'Purchase Order': "Supplier"
}
for d in self.get("entries"):
if d.get(against_field):
dr_or_cr = "credit" if against_field in ["against_invoice", "against_sales_order"] \
else "debit"
if against_field in ["against_invoice", "against_sales_order"] \
and flt(d.debit) > 0:
frappe.throw(_("Row {0}: Debit entry can not be linked with a {1}").format(d.idx, doctype))
if against_field in ["against_voucher", "against_purchase_order"] \
and flt(d.credit) > 0:
frappe.throw(_("Row {0}: Credit entry can not be linked with a {1}").format(d.idx, doctype))
voucher_account = frappe.db.get_value(doctype, d.get(against_field), \
scrub(field_dict.get(doctype)))
account_master_name = frappe.db.get_value("Account", d.account, "master_name")
if against_field in ["against_invoice", "against_voucher"] \
and voucher_account != d.account:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} account") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
if against_field in ["against_sales_order", "against_purchase_order"]:
if voucher_account != account_master_name:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} Name") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
elif d.is_advance == "Yes":
payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr)))
return payment_against_voucher
def validate_against_invoice_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "outstanding_amount"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) < flt(sum(payment_list)):
frappe.throw(_("Payment against {0} {1} cannot be greater \
than Outstanding Amount {2}").format(doctype, voucher_no, voucher_properties[1]))
def validate_against_order_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "per_billed", "advance_paid", "grand_total"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) >= 100:
frappe.throw(_("{0} {1} is fully billed").format(doctype, voucher_no))
if flt(voucher_properties[3]) < flt(voucher_properties[2]) + flt(sum(payment_list)):
frappe.throw(_("Advance paid against {0} {1} cannot be greater \
than Grand Total {2}").format(doctype, voucher_no, voucher_properties[3]))
def set_against_account(self):
accounts_debited, accounts_credited = [], []
for d in self.get("entries"):
if flt(d.debit > 0): accounts_debited.append(d.account)
if flt(d.credit) > 0: accounts_credited.append(d.account)
for d in self.get("entries"):
if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited)))
def validate_debit_and_credit(self):
self.total_debit, self.total_credit, self.difference = 0, 0, 0
for d in self.get("entries"):
if d.debit and d.credit:
frappe.throw(_("You cannot credit and debit same account at the same time"))
self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "entries"))
self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "entries"))
self.difference = flt(self.total_debit, self.precision("total_debit")) - \
flt(self.total_credit, self.precision("total_credit"))
if self.difference:
frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}")
.format(self.difference))
def create_remarks(self):
r = []
if self.cheque_no:
if self.cheque_date:
r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date)))
else:
msgprint(_("Please enter Reference date"), raise_exception=frappe.MandatoryError)
for d in self.get('entries'):
if d.against_invoice and d.credit:
currency = frappe.db.get_value("Sales Invoice", d.against_invoice, "currency")
r.append(_("{0} against Sales Invoice {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_invoice))
if d.against_sales_order and d.credit:
currency = frappe.db.get_value("Sales Order", d.against_sales_order, "currency")
r.append(_("{0} against Sales Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_sales_order))
if d.against_voucher and d.debit:
bill_no = frappe.db.sql("""select bill_no, bill_date, currency
from `tabPurchase Invoice` where name=%s""", d.against_voucher)
if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \
not in ['na', 'not applicable', 'none']:
r.append(_('{0} {1} against Bill {2} dated {3}').format(bill_no[0][2],
fmt_money(flt(d.debit)), bill_no[0][0],
bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d'))))
if d.against_purchase_order and d.debit:
currency = frappe.db.get_value("Purchase Order", d.against_purchase_order, "currency")
r.append(_("{0} against Purchase Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_purchase_order))
if self.user_remark:
r.append(_("Note: {0}").format(self.user_remark))
if r:
self.remark = ("\n").join(r) #User Remarks is not mandatory
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
else:
# check account type whether supplier or customer
exists = False
for d in self.get('entries'):
account_type = frappe.db.get_value("Account", d.account, "account_type")
if account_type in ["Supplier", "Customer"]:
exists = True
break
# If customer/supplier account, aging date is mandatory
if exists and not self.aging_date:
msgprint(_("Aging Date is mandatory for opening entry"), raise_exception=1)
else:
self.aging_date = self.posting_date
def set_print_format_fields(self):
for d in self.get('entries'):
result = frappe.db.get_value("Account", d.account,
["account_type", "master_type"])
if not result:
continue
account_type, master_type = result
if master_type in ['Supplier', 'Customer']:
if not self.pay_to_recd_from:
self.pay_to_recd_from = frappe.db.get_value(master_type,
' - '.join(d.account.split(' - ')[:-1]),
master_type == 'Customer' and 'customer_name' or 'supplier_name')
if account_type in ['Bank', 'Cash']:
company_currency = get_company_currency(self.company)
amt = flt(d.debit) and d.debit or d.credit
self.total_amount = fmt_money(amt, currency=company_currency)
from frappe.utils import money_in_words
self.total_amount_in_words = money_in_words(amt, company_currency)
def check_credit_days(self):
date_diff = 0
if self.cheque_date:
date_diff = (getdate(self.cheque_date)-getdate(self.posting_date)).days
if date_diff <= 0: return
# Get List of Customer Account
acc_list = filter(lambda d: frappe.db.get_value("Account", d.account,
"master_type")=='Customer', self.get('entries'))
for d in acc_list:
credit_days = self.get_credit_days_for(d.account)
# Check credit days
if credit_days > 0 and not self.get_authorized_user() and cint(date_diff) > credit_days:
msgprint(_("Maximum allowed credit is {0} days after posting date").format(credit_days),
raise_exception=1)
def get_credit_days_for(self, ac):
if not self.credit_days_for.has_key(ac):
self.credit_days_for[ac] = cint(frappe.db.get_value("Account", ac, "credit_days"))
if not self.credit_days_for[ac]:
if self.credit_days_global==-1:
self.credit_days_global = cint(frappe.db.get_value("Company",
self.company, "credit_days"))
return self.credit_days_global
else:
return self.credit_days_for[ac]
def get_authorized_user(self):
if self.is_approving_authority==-1:
self.is_approving_authority = 0
# Fetch credit controller role
approving_authority = frappe.db.get_value("Accounts Settings", None,
"credit_controller")
# Check logged-in user is authorized
if approving_authority in frappe.user.get_roles():
self.is_approving_authority = 1
return self.is_approving_authority
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
for d in self.get("entries"):
if d.debit or d.credit:
gl_map.append(
self.get_gl_dict({
"account": d.account,
"against": d.against_account,
"debit": flt(d.debit, self.precision("debit", "entries")),
"credit": flt(d.credit, self.precision("credit", "entries")),
"against_voucher_type": (("Purchase Invoice" if d.against_voucher else None)
or ("Sales Invoice" if d.against_invoice else None)
or ("Journal Voucher" if d.against_jv else None)
or ("Sales Order" if d.against_sales_order else None)
or ("Purchase Order" if d.against_purchase_order else None)),
"against_voucher": d.against_voucher or d.against_invoice or d.against_jv
or d.against_sales_order or d.against_purchase_order,
"remarks": self.remark,
"cost_center": d.cost_center
})
)
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
def check_credit_limit(self):
for d in self.get("entries"):
master_type, master_name = frappe.db.get_value("Account", d.account,
["master_type", "master_name"])
if master_type == "Customer" and master_name:
super(JournalVoucher, self).check_credit_limit(d.account)
def get_balance(self):
if not self.get('entries'):
msgprint(_("'Entries' cannot be empty"), raise_exception=True)
else:
flag, self.total_debit, self.total_credit = 0, 0, 0
diff = flt(self.difference, self.precision("difference"))
# If any row without amount, set the diff on that row
for d in self.get('entries'):
if not d.credit and not d.debit and diff != 0:
if diff>0:
d.credit = diff
elif diff<0:
d.debit = diff
flag = 1
# Set the diff in a new row
if flag == 0 and diff != 0:
jd = self.append('entries', {})
if diff>0:
jd.credit = abs(diff)
elif diff<0:
jd.debit = abs(diff)
self.validate_debit_and_credit()
def get_outstanding_invoices(self):
self.set('entries', [])
total = 0
for d in self.get_values():
total += flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1 = self.append('entries', {})
jd1.account = d.account
if self.write_off_based_on == 'Accounts Receivable':
jd1.credit = flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1.against_invoice = cstr(d.name)
elif self.write_off_based_on == 'Accounts Payable':
jd1.debit = flt(d.outstanding_amount, self.precision("debit", "entries"))
jd1.against_voucher = cstr(d.name)
jd2 = self.append('entries', {})
if self.write_off_based_on == 'Accounts Receivable':
jd2.debit = total
elif self.write_off_based_on == 'Accounts Payable':
jd2.credit = total
self.validate_debit_and_credit()
def get_values(self):
cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \
if flt(self.write_off_amount) > 0 else ""
if self.write_off_based_on == 'Accounts Receivable':
return frappe.db.sql("""select name, debit_to as account, outstanding_amount
from `tabSales Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
elif self.write_off_based_on == 'Accounts Payable':
return frappe.db.sql("""select name, credit_to as account, outstanding_amount
from `tabPurchase Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
@frappe.whitelist()
def get_default_bank_cash_account(company, voucher_type):
from erpnext.accounts.utils import get_balance_on
account = frappe.db.get_value("Company", company,
voucher_type=="Bank Voucher" and "default_bank_account" or "default_cash_account")
if account:
return {
"account": account,
"balance": get_balance_on(account)
}
@frappe.whitelist()
def get_payment_entry_from_sales_invoice(sales_invoice):
from erpnext.accounts.utils import get_balance_on
si = frappe.get_doc("Sales Invoice", sales_invoice)
jv = get_payment_entry(si)
jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks)
# credit customer
jv.get("entries")[0].account = si.debit_to
jv.get("entries")[0].balance = get_balance_on(si.debit_to)
jv.get("entries")[0].credit = si.outstanding_amount
jv.get("entries")[0].against_invoice = si.name
# debit bank
jv.get("entries")[1].debit = si.outstanding_amount
return jv.as_dict()
@frappe.whitelist()
def get_payment_entry_from_purchase_invoice(purchase_invoice):
from erpnext.accounts.utils import get_balance_on
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
jv = get_payment_entry(pi)
jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks)
# credit supplier
jv.get("entries")[0].account = pi.credit_to
jv.get("entries")[0].balance = get_balance_on(pi.credit_to)
jv.get("entries")[0].debit = pi.outstanding_amount
jv.get("entries")[0].against_voucher = pi.name
# credit bank
jv.get("entries")[1].credit = pi.outstanding_amount
return jv.as_dict()
def get_payment_entry(doc):
bank_account = get_default_bank_cash_account(doc.company, "Bank Voucher")
jv = frappe.new_doc('Journal Voucher')
jv.voucher_type = 'Bank Voucher'
jv.company = doc.company
jv.fiscal_year = doc.fiscal_year
jv.append("entries")
d2 = jv.append("entries")
if bank_account:
d2.account = bank_account["account"]
d2.balance = bank_account["balance"]
return jv
@frappe.whitelist()
def get_opening_accounts(company):
"""get all balance sheet accounts for opening entry"""
from erpnext.accounts.utils import get_balance_on
accounts = frappe.db.sql_list("""select name from tabAccount
where group_or_ledger='Ledger' and report_type='Balance Sheet' and company=%s""", company)
return [{"account": a, "balance": get_balance_on(a)} for a in accounts]
def get_against_purchase_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, credit_to, outstanding_amount, bill_no, bill_date
from `tabPurchase Invoice` where credit_to = %s and docstatus = 1
and outstanding_amount > 0 and %s like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_sales_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, debit_to, outstanding_amount
from `tabSales Invoice` where debit_to = %s and docstatus = 1
and outstanding_amount > 0 and `%s` like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select jv.name, jv.posting_date, jv.user_remark
from `tabJournal Voucher` jv, `tabJournal Voucher Detail` jv_detail
where jv_detail.parent = jv.name and jv_detail.account = %s and jv.docstatus = 1
and jv.%s like %s order by jv.name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
@frappe.whitelist()
def get_outstanding(args):
args = eval(args)
if args.get("doctype") == "Journal Voucher" and args.get("account"):
against_jv_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabJournal Voucher Detail` where parent=%s and account=%s
and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')=''
and ifnull(against_jv, '')=''""", (args['docname'], args['account']))
against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0
if against_jv_amount > 0:
return {"credit": against_jv_amount}
else:
return {"debit": -1* against_jv_amount}
elif args.get("doctype") == "Sales Invoice":
return {
"credit": flt(frappe.db.get_value("Sales Invoice", args["docname"],
"outstanding_amount"))
}
elif args.get("doctype") == "Purchase Invoice":
return {
"debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"],
"outstanding_amount"))
}
|
eBay/cronus-agent
|
refs/heads/master
|
agent/agent/lib/agent_thread/download_thread.py
|
1
|
#pylint: disable=W0703,W0511,W0402,R0911,R0915,R0912,W0331,W0612,R0904,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Thread to download a package """
from agent.lib import utils, contextutils
from agent.lib.agent_thread.agent_thread import AgentThread
from agent.lib.errors import Errors, FileNotFoundError, AgentException
from agent.lib.package import PackageUtil
from agent.lib.utils import loadPropFile
from random import randint
import json
import logging
import os
import pylons
import time
import traceback
import urlgrabber
LOG = logging.getLogger(__name__)
class DownloadThread(AgentThread):
""" Separate thread to start download """
def __init__(self, threadMgr, packageUri, packageloc, path = None, category = None, skipProp = True):
# cat = 'DC_DowloadThread' + str(datetime.now())
AgentThread.__init__(self, threadMgr, cat = category, name = 'download_thread')
self.__path = pylons.config['repo_root']
if path is not None:
self.__path = path
# check to see if the package path exists
if (not os.path.isdir(self.__path)):
msg = 'Package path(%s) does not exist' % self.__path
LOG.error(msg)
raise AgentException(Errors.PACKAGE_PATH_ERROR, msg)
self.__uriDict = PackageUtil.parseUri(packageUri, self.__path, packageloc)
self.__prop = {}
self.__error = None
self.__progress = 0.0
self.__timeouts = None
self.__skipProp = skipProp
def getUriDict(self):
''' Get the package info dictionary '''
return self.__uriDict
def beforeRun(self):
""" set external timeout values if any """
# set timeout
if contextutils.existcontext(self, 'thread_timeout'):
self._timeout = contextutils.getcontext(self, 'thread_timeout', self._timeout)
if contextutils.existcontext(self, 'thread_progress_timeout'):
self._progressTimeout = contextutils.getcontext(self, 'thread_progress_timeout', self._progressTimeout)
def doRun(self):
""" Progress Info:
0 : start
1 - 2 : prop file download
3 - 5 : checking existing data (hash verification)
6 - 8 : allocating disk space (in case of pre_allocate
9 - 99 : downloading data
100 : download complete.
"""
try:
self.__startDownload()
except AgentException as exc:
msg = 'Error for package (%s) - %s' % (self.__uriDict['package'], str(exc))
LOG.error(msg)
self._updateStatus(httpStatus = 500, error = exc.getCode(), errorMsg = msg)
except Exception as exc:
msg = 'Unknown error for package (%s) - %s' % (self.__uriDict['package'], str(exc))
LOG.error(msg)
self._updateStatus(httpStatus = 500, error = Errors.UNKNOWN_ERROR, errorMsg = msg)
def __startDownload(self):
""" actual download logic """
try:
LOG.info("Starting package download for package %s" % self.__uriDict['package'])
# check to see if there's an in progress file,
# since PackageMgr guarantees that duplicate threads will not be spawned
# for same pkg, assume an existing thread was killed.
# attempt to clean up package n move
if (os.path.exists(self.__uriDict['inProgressPackagePath'])):
LOG.debug('In progress file (%s) already exists. Will validate and reattempt download if necessary' % self.__uriDict['inProgressPackagePath'])
if os.path.exists(self.__uriDict['packagePath']):
if (os.path.exists(self.__uriDict['propPath']) and
PackageUtil.validateProp(self.__uriDict['propPath']) and
PackageUtil.validatePackage(self.__uriDict['packagePath'], self.__uriDict['propPath'])):
msg = 'The package already exists. Will NOT download duplicate package' + self.__uriDict['packagePath']
LOG.info(msg)
os.utime(self.__uriDict['packagePath'], None)
os.utime(self.__uriDict['propPath'], None)
self._updateStatus(progress = 100)
# NOTE: this is a normal exit not an error!
return
LOG.warning('The package already exists. However package prop (%s) failed validation. Downloading package.' % self.__uriDict['propPath'])
# Delete all traces of package before beginning download
LOG.info('Cleaning up all packages for %s ' % self.__uriDict['packagePath'])
PackageUtil.cleanUpPackage(self.__uriDict['inProgressPackagePath'],
self.__uriDict['packagePath'],
self.__uriDict['propPath'])
AgentThread._updateProgress(self, 0)
if not self.__skipProp:
# First, download .prop file
LOG.info('Starting download of prop file %s - %s' % (self.__uriDict['propUri'], self.__uriDict['propPath']))
self.__download_prop_file()
try:
self.__prop = loadPropFile(self.__uriDict['propPath'])
except FileNotFoundError:
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) unable to read or did not parse' % (self.__uriDict['propPath']))
AgentThread._updateProgress(self, 2)
self.__setProgressTimeouts()
if self.__uriDict['scheme'] == 'http':
# try download 3 times, with random sleep
for _ in range(3):
try:
sotimeout = float(pylons.config['download_thread_sotimeout'])
proxies = json.loads(pylons.config['urlgrabber_proxies'])
urlgrabber.urlgrab(self.__uriDict['uri'],
self.__uriDict['inProgressPackagePath'],
checkfunc = None if self.__skipProp else (PackageUtil.validateDownload, (), {}),
progress_obj = DownloadProgress(self),
throttle = float(pylons.config['package_throttle']),
bandwidth = int(pylons.config['package_bandwidth']),
keepalive = 0,
timeout = sotimeout,
proxies = proxies)
break
except Exception as exc:
msg = 'Download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.warning(msg)
randsleep = randint(30, 60)
time.sleep(randsleep)
else:
# oops! only http and bittorrent supported now
raise AgentException(Errors.DC_UNSUPPORTED_PROTOCOL, 'Only http protocols is supported at the moment')
self._checkStop()
if not self.__skipProp:
if (not PackageUtil.validatePackage(self.__uriDict['inProgressPackagePath'], self.__uriDict['propPath'])):
raise AgentException(Errors.DC_FAILED_VALIDATE, 'Package ' + self.__uriDict['packagePath'] + ' failed validation')
os.utime(self.__uriDict['propPath'], None)
utils.rchmod(self.__uriDict['propPath'], "777", 'no')
LOG.info('Download complete, will now rename and do validation on this file %s' % self.__uriDict['packagePath'])
os.rename(self.__uriDict['inProgressPackagePath'], self.__uriDict['packagePath'])
os.utime(self.__uriDict['packagePath'], None)
utils.rchmod(self.__uriDict['packagePath'], "777", 'no')
LOG.info("Download complete, Validation completed, updating progress to 100")
self._updateStatus(progress = 100)
except AgentException, exc:
self._updateStatus(httpStatus = 500, progress = 0, error = exc.getCode(), errorMsg = exc.getMsg())
msg = 'Download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.error(msg)
raise exc
except Exception, exc:
self._updateStatus(httpStatus = 500, progress = 0, error = Errors.UNKNOWN_ERROR, errorMsg = str(exc))
msg = 'Unknown download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.error(msg)
raise exc
finally:
LOG.info("Completed package download for package %s" % self.__uriDict['package'])
def __setProgressTimeouts(self):
""" Setting timeout for download thread. The timeouts uses the size of the package."""
if not self.__skipProp:
timeout = float(self.__prop['size']) / float(pylons.config['download_thread_rate_per_sec'])
timeout = max(timeout, float(pylons.config['download_thread_min_time']))
timeout = min(timeout, float(pylons.config['download_thread_max_time']))
progressTimeout = timeout * float(pylons.config['download_thread_progress_ratio'])
progressTimeout = max(progressTimeout, float(pylons.config['download_thread_min_progress_time']))
self.extendTimeout(timeout)
self.setProgressTimeout(progressTimeout)
self.__timeouts = (timeout, progressTimeout)
else:
self.__timeouts = (self._timeout, self._progressTimeout)
LOG.debug('Using timeout=%s and progress timeout=%s' % self.__timeouts)
def getProgressTimeouts(self):
"""
Getting timeout from the download thread. The timeout is either None or consists
provides (total timeout, progress timeout)
"""
return self.__timeouts
def stop(self):
""" stopping client before calling the super method """
LOG.info('STOP download thread is called stopping')
AgentThread.stop(self)
def _updateHttpProgress(self, amount_read):
""" custom progress computation """
if not self.__skipProp:
progress = 2 + ((float(amount_read) / float(self.__prop['size'])) * (97))
AgentThread._updateProgress(self, progress)
else:
progress = min(self.getProgress(), 97) + 1
AgentThread._updateProgress(self, progress)
self._checkStop()
def __download_prop_file(self):
""" download prop file and validate """
# retry 3 times download prop file
for _ in range(3):
try:
sotimeout = float(pylons.config['download_thread_sotimeout'])
proxies = json.loads(pylons.config['urlgrabber_proxies'])
urlgrabber.urlgrab(
self.__uriDict['propUri'],
self.__uriDict['propPath'],
keepalive = 0,
timeout = sotimeout,
proxies = proxies)
break
except Exception:
randsleep = randint(30, 60)
time.sleep(randsleep)
if (not os.path.exists(self.__uriDict['propPath'])):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) does not exist' % (self.__uriDict['propPath']))
if not PackageUtil.validateProp(self.__uriDict['propPath']):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) failed validation' % (self.__uriDict['propPath']))
#pylint: disable=W0212
class DownloadProgress(object):
""" object to track the progress of a package """
def __init__(self, thread):
""" constructor """
object.__init__(self)
self.__thread = thread
def start(self, filename = None, url = None, basename = None,
size = None, now = None, text = None):
""" called during the start of the progress """
pass
def update(self, amount_read, now = None):
""" update the progress """
self.__thread._updateHttpProgress(amount_read)
def end(self, amount_read, now = None):
""" end the progress """
pass
|
8l/beri
|
refs/heads/master
|
cheritest/trunk/tests/cp0/test_tltiu_eq.py
|
2
|
#-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_tltiu_eq(BaseBERITestCase):
@attr('trapi')
def test_handled(self):
self.assertRegisterEqual(self.MIPS.a2, 0, "tltiu trapped when equal to")
|
superbeckgit/dstauffman
|
refs/heads/master
|
latex.py
|
1
|
# -*- coding: utf-8 -*-
r"""
Support functions used to create LaTeX documentation.
Notes
-----
#. Written by David C. Stauffer in Jan 2015, moved to separate file in Jan 2017.
"""
#%% Imports
import doctest
import unittest
#%% Functions - make_preamble
def make_preamble(caption, label, cols, size=r'\small', *, use_mini=False, short_cap=None):
r"""
Writes the table header and preamble.
Parameters
----------
caption : str
Table caption
label : str
LaTeX reference label for table
cols : str
LaTeX string describing columns
size : str, optional, from {r'\tiny', r'\scriptsize', r'\footnotesize', r'\small',
r'\normalsize', r'\large', r'\Large', r'\LARGE', r'\huge', r'\Huge'}
Size of the text within the table, default is \small
use_mini : bool, optional, default is False
Whether to build the table as a minipage or not
short_cap : str, optional
If present, used as optional caption argument for List of Tables caption.
Returns
-------
out : list of str
LaTeX text to build the table header, where each entry in the list is a row of text in the
document
Examples
--------
>>> from dstauffman import make_preamble
>>> out = make_preamble('Table Caption', 'tab:this_label', 'lcc')
>>> print(out) # doctest: +ELLIPSIS
['\\begin{table}[H]', ' \\small', ' \\centering', ' \\caption{Table Caption}%', ...
"""
# check that size is valid
assert size in {r'\tiny', r'\scriptsize', r'\footnotesize', r'\small', r'\normalsize',
r'\large', r'\Large', r'\LARGE', r'\huge', r'\Huge'}
# create caption string
if short_cap is None:
cap_str = r' \caption{' + caption + r'}%'
else:
cap_str = r' \caption[' + short_cap + r']{' + caption + r'}%'
# build table based on minipage or not
if not use_mini:
out = [r'\begin{table}[H]', ' '+size, r' \centering', cap_str, \
r' \label{' + label + r'}', r' \begin{tabular}{' + cols + r'}', r' \toprule']
else:
out = [r'\begin{table}[H]', ' '+size, r' \centering', cap_str, \
r' \label{' + label + r'}', r' \begin{minipage}{\linewidth}', r' \centering', \
r' \begin{tabular}{' + cols + r'}', r' \toprule']
return out
#%% Functions - make_conclusion
def make_conclusion(*, use_mini=False):
r"""
Writes closing tags at the end of the table.
Parameters
----------
use_mini : bool, optional, default is False
Whether to conclude the table as part of a minipage
Returns
-------
out : list of str
LaTeX text to build the table footer, where each entry in the list is a row of text in the
document
Examples
--------
>>> from dstauffman import make_conclusion
>>> out = make_conclusion()
>>> print(out)
[' \\bottomrule', ' \\end{tabular}', '\\end{table}', '']
"""
if not use_mini:
out = [r' \bottomrule', r' \end{tabular}', r'\end{table}', '']
else:
out = [r' \bottomrule', r' \end{tabular}', r' \end{minipage}', r'\end{table}', '']
return out
#%% Functions - bins_to_str_ranges
def bins_to_str_ranges(bins, dt=1, cutoff=1000):
r"""
Takes a given bin vector, and returns a string representation with both boundaries.
Parameters
----------
bins : array_like
Boundaries for the bins
dt : numeric scalar
Amount to subtract from the right side boundary, default is 1
cutoff : numeric scalar
Value at which to consider everything above it as unbounded
Returns
-------
out : list of str
String representations of the bins
Notes
-----
#. This function works on ages, years, CD4 bins or other similar things.
Examples
--------
>>> from dstauffman import bins_to_str_ranges
>>> import numpy as np
>>> age_bins = np.array([0, 20, 40, 60, 100000], dtype=int)
>>> age_strs = bins_to_str_ranges(age_bins)
>>> print(age_strs)
['0-19', '20-39', '40-59', '60+']
"""
# preallocate output
out = []
# loop through ages
for r in range(len(bins)-1):
# alias the boundaries
value1 = bins[r]
value2 = bins[r+1]-dt
# check for large values, and replace appropriately
if value1 == value2:
this_str = '{:g}'.format(value1)
elif value2 < cutoff:
this_str = '{:g}-{:g}'.format(value1, value2)
else:
this_str = '{:g}+'.format(value1)
# save this result
out.append(this_str)
# return everything combined as a list
return out
#%% Unit test
if __name__ == '__main__':
unittest.main(module='tests.test_latex', exit=False)
doctest.testmod(verbose=False)
|
gVallverdu/pymatgen
|
refs/heads/master
|
pymatgen/electronic_structure/tests/test_boltztrap2.py
|
3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import warnings
from pymatgen.io.vasp import Vasprun
from pymatgen.electronic_structure.core import Spin, OrbitalType
import numpy as np
from monty.serialization import loadfn
try:
from pymatgen.electronic_structure.boltztrap2 import BandstructureLoader, \
VasprunLoader, BztInterpolator, BztTransportProperties, BztPlotter, \
merge_up_down_doses
BOLTZTRAP2_PRESENT = True
except Exception:
BOLTZTRAP2_PRESENT = False
BOLTZTRAP2_PRESENT = False
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/boltztrap2/')
vrunfile = os.path.join(test_dir, 'vasprun.xml')
vrun = Vasprun(vrunfile, parse_projected_eigen=True)
vrunfile_sp = os.path.join(test_dir, 'vasprun_spin.xml')
vrun_sp = Vasprun(vrunfile_sp, parse_projected_eigen=True)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BandstructureLoaderTest(unittest.TestCase):
def setUp(self):
bs = loadfn(os.path.join(test_dir, "PbTe_bandstructure.json"))
bs_sp = loadfn(os.path.join(test_dir, "N2_bandstructure.json"))
self.loader = BandstructureLoader(bs, vrun.structures[-1])
self.assertIsNotNone(self.loader)
self.loader_sp_up = BandstructureLoader(bs_sp, vrun_sp.structures[-1], spin=1)
self.loader_sp_dn = BandstructureLoader(bs_sp, vrun_sp.structures[-1], spin=-1)
self.assertTupleEqual(self.loader_sp_up.ebands.shape, (12, 198))
self.assertTupleEqual(self.loader_sp_dn.ebands.shape, (12, 198))
self.assertIsNotNone(self.loader_sp_dn)
self.assertIsNotNone(self.loader_sp_up)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.loader.ebands.shape, (20, 120))
self.assertAlmostEqual(self.loader.fermi, 0.185266535678, 5)
self.assertAlmostEqual(self.loader.structure.lattice.a, 4.64303565932548, 5)
def test_get_volume(self):
self.assertAlmostEqual(self.loader.get_volume(), 477.6256714925874, 5)
def test_set_upper_lower_bands(self):
min_bnd = min(self.loader_sp_up.ebands.min(), self.loader_sp_dn.ebands.min())
max_bnd = max(self.loader_sp_up.ebands.max(), self.loader_sp_dn.ebands.max())
self.loader_sp_up.set_upper_lower_bands(min_bnd, max_bnd)
self.loader_sp_dn.set_upper_lower_bands(min_bnd, max_bnd)
self.assertTupleEqual(self.loader_sp_up.ebands.shape, (14, 198))
self.assertTupleEqual(self.loader_sp_dn.ebands.shape, (14, 198))
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class VasprunLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = VasprunLoader(vrun)
self.assertTupleEqual(self.loader.proj.shape, (120, 20, 2, 9))
self.assertIsNotNone(self.loader)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.loader.ebands.shape, (20, 120))
self.assertAlmostEqual(self.loader.fermi, 0.185266535678, 5)
self.assertAlmostEqual(self.loader.structure.lattice.a, 4.64303565932548, 5)
def test_get_volume(self):
self.assertAlmostEqual(self.loader.get_volume(), 477.6256714925874, 5)
def test_from_file(self):
self.loader = VasprunLoader().from_file(vrunfile)
self.assertIsNotNone(self.loader)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztInterpolatorTest(unittest.TestCase):
def setUp(self):
self.loader = VasprunLoader(vrun)
self.assertTupleEqual(self.loader.proj.shape, (120, 20, 2, 9))
self.bztInterp = BztInterpolator(self.loader, lpfac=2)
self.assertIsNotNone(self.bztInterp)
warnings.simplefilter("ignore")
bs_sp = loadfn(os.path.join(test_dir, "N2_bandstructure.json"))
loader_sp_up = BandstructureLoader(bs_sp, vrun_sp.structures[-1], spin=1)
loader_sp_dn = BandstructureLoader(bs_sp, vrun_sp.structures[-1], spin=-1)
min_bnd = min(loader_sp_up.ebands.min(), loader_sp_dn.ebands.min())
max_bnd = max(loader_sp_up.ebands.max(), loader_sp_dn.ebands.max())
loader_sp_up.set_upper_lower_bands(min_bnd, max_bnd)
loader_sp_dn.set_upper_lower_bands(min_bnd, max_bnd)
self.bztI_up = BztInterpolator(loader_sp_up, lpfac=2, energy_range=np.inf, curvature=False)
self.bztI_dn = BztInterpolator(loader_sp_dn, lpfac=2, energy_range=np.inf, curvature=False)
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.bztInterp.cband.shape, (5, 3, 3, 3, 29791))
self.assertTupleEqual(self.bztInterp.eband.shape, (5, 29791))
self.assertTupleEqual(self.bztInterp.coeffs.shape, (5, 322))
self.assertEqual(self.bztInterp.nemax, 12)
def test_get_band_structure(self):
sbs = self.bztInterp.get_band_structure()
self.assertIsNotNone(sbs)
self.assertTupleEqual(sbs.bands[Spin.up].shape, (5, 137))
def test_tot_dos(self):
tot_dos = self.bztInterp.get_dos(T=200, npts_mu=100)
self.assertIsNotNone(tot_dos)
self.assertEqual(len(tot_dos.energies), 100)
self.assertAlmostEqual(tot_dos.densities[Spin.up][0], 1.42859939, 5)
dos_up = self.bztI_up.get_dos(partial_dos=False, npts_mu=100)
dos_dn = self.bztI_dn.get_dos(partial_dos=False, npts_mu=100)
cdos = merge_up_down_doses(dos_up, dos_dn)
self.assertAlmostEqual(cdos.densities[Spin.down][50], 92.87836778, 5)
self.assertAlmostEqual(cdos.densities[Spin.up][45], 9.564067, 5)
self.assertEqual(len(cdos.energies), 100)
def test_tot_proj_dos(self):
tot_proj_dos = self.bztInterp.get_dos(partial_dos=True, T=200, npts_mu=100)
self.assertIsNotNone(tot_proj_dos)
self.assertEqual(len(tot_proj_dos.get_spd_dos().values()), 3)
pdos = tot_proj_dos.get_spd_dos()[OrbitalType.s].densities[Spin.up][0]
self.assertAlmostEqual(pdos, 15.474392020, 5)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztTransportPropertiesTest(unittest.TestCase):
def setUp(self):
loader = VasprunLoader(vrun)
bztInterp = BztInterpolator(loader, lpfac=2)
self.bztTransp = BztTransportProperties(bztInterp, temp_r=np.arange(300, 600, 100))
self.assertIsNotNone(self.bztTransp)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
for p in [self.bztTransp.Conductivity_mu, self.bztTransp.Seebeck_mu,
self.bztTransp.Kappa_mu, self.bztTransp.Effective_mass_mu,
self.bztTransp.Power_Factor_mu]:
self.assertTupleEqual(p.shape, (3, 3670, 3, 3))
for p in [self.bztTransp.Carrier_conc_mu, self.bztTransp.Hall_carrier_conc_trace_mu]:
self.assertTupleEqual(p.shape, (3, 3670))
def test_compute_properties_doping(self):
self.bztTransp.compute_properties_doping(doping=10. ** np.arange(20, 22))
for p in [self.bztTransp.Conductivity_doping, self.bztTransp.Seebeck_doping,
self.bztTransp.Kappa_doping, self.bztTransp.Effective_mass_doping,
self.bztTransp.Power_Factor_doping]:
self.assertTupleEqual(p['n'].shape, (3, 2, 3, 3))
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztPlotterTest(unittest.TestCase):
def test_plot(self):
loader = VasprunLoader(vrun)
bztInterp = BztInterpolator(loader, lpfac=2)
bztTransp = BztTransportProperties(bztInterp, temp_r=np.arange(300, 600, 100))
self.bztPlotter = BztPlotter(bztTransp, bztInterp)
self.assertIsNotNone(self.bztPlotter)
fig = self.bztPlotter.plot_props('S', 'mu', 'temp', temps=[300, 500])
self.assertIsNotNone(fig)
fig = self.bztPlotter.plot_bands()
self.assertIsNotNone(fig)
fig = self.bztPlotter.plot_dos()
self.assertIsNotNone(fig)
if __name__ == '__main__':
unittest.main()
|
Spiderlover/Toontown
|
refs/heads/master
|
toontown/ai/DistributedPolarPlaceEffectMgrAI.py
|
3
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.distributed.ClockDelta import *
from direct.fsm.FSM import FSM
import time
class DistributedPolarPlaceEffectMgrAI(DistributedObjectAI, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPolarPlaceEffectMgrAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
FSM.__init__(self, 'ResistanceFSM')
self.air = air
def enterOff(self):
self.requestDelete()
def addPolarPlaceEffect(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av: return
expireTime = int((time.time()/60) + 0.5) + 60
av.b_setCheesyEffect(13, 3000, expireTime)
|
marma/rdflib
|
refs/heads/master
|
rdflib/plugins/parsers/pyRdfa/termorcurie.py
|
22
|
# -*- coding: utf-8 -*-
"""
Management of vocabularies, terms, and their mapping to URI-s. The main class of this module (L{TermOrCurie}) is,
conceptually, part of the overall state of processing at a node (L{state.ExecutionContext}) but putting it into a separate
module makes it easider to maintain.
@summary: Management of vocabularies, terms, and their mapping to URI-s.
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@var XHTML_PREFIX: prefix for the XHTML vocabulary URI (set to 'xhv')
@var XHTML_URI: URI prefix of the XHTML vocabulary
@var ncname: Regular expression object for NCNAME
@var termname: Regular expression object for a term
@var xml_application_media_type: Regular expression object for a general XML application media type
"""
"""
$Id: termorcurie.py,v 1.12 2013-10-16 11:48:54 ivan Exp $
$Date: 2013-10-16 11:48:54 $
"""
import re, sys
import xml.dom.minidom
import random
if sys.version_info[0] >= 3 :
from urllib.parse import urlsplit
else :
from urlparse import urlsplit
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import Graph
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
else :
from rdflib.Graph import Graph
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from .options import Options
from .utils import quote_URI, URIOpener
from .host import MediaTypes, HostLanguage, predefined_1_0_rel, warn_xmlns_usage
from . import IncorrectPrefixDefinition, RDFA_VOCAB, UnresolvableReference, PrefixRedefinitionWarning
from . import ns_rdfa
from . import err_redefining_URI_as_prefix
from . import err_xmlns_deprecated
from . import err_bnode_local_prefix
from . import err_col_local_prefix
from . import err_missing_URI_prefix
from . import err_invalid_prefix
from . import err_no_default_prefix
from . import err_prefix_and_xmlns
from . import err_non_ncname_prefix
from . import err_absolute_reference
from . import err_query_reference
from . import err_fragment_reference
from . import err_prefix_redefinition
# Regular expression object for NCNAME
ncname = re.compile("^[A-Za-z][A-Za-z0-9._-]*$")
# Regular expression object for term name
termname = re.compile("^[A-Za-z]([A-Za-z0-9._-]|/)*$")
# Regular expression object for a general XML application media type
xml_application_media_type = re.compile("application/[a-zA-Z0-9]+\+xml")
XHTML_PREFIX = "xhv"
XHTML_URI = "http://www.w3.org/1999/xhtml/vocab#"
#### Managing blank nodes for CURIE-s: mapping from local names to blank nodes.
_bnodes = {}
_empty_bnode = BNode()
####
class InitialContext :
"""
Get the initial context values. In most cases this class has an empty content, except for the
top level (in case of RDFa 1.1). Each L{TermOrCurie} class has one instance of this class. It provides initial
mappings for terms, namespace prefixes, etc, that the top level L{TermOrCurie} instance uses for its own initialization.
@ivar terms: collection of all term mappings
@type terms: dictionary
@ivar ns: namespace mapping
@type ns: dictionary
@ivar vocabulary: default vocabulary
@type vocabulary: string
"""
def __init__(self, state, top_level) :
"""
@param state: the state behind this term mapping
@type state: L{state.ExecutionContext}
@param top_level : whether this is the top node of the DOM tree (the only place where initial contexts are handled)
@type top_level : boolean
"""
self.state = state
# This is to store the local terms
self.terms = {}
# This is to store the local Namespaces (a.k.a. prefixes)
self.ns = {}
# Default vocabulary
self.vocabulary = None
if state.rdfa_version < "1.1" or top_level == False :
return
from .initialcontext import initial_context as context_data
from .host import initial_contexts as context_ids
from .host import default_vocabulary
for id in context_ids[state.options.host_language] :
# This gives the id of a initial context, valid for this media type:
data = context_data[id]
# Merge the context data with the overall definition
if state.options.host_language in default_vocabulary :
self.vocabulary = default_vocabulary[state.options.host_language]
elif data.vocabulary != "" :
self.vocabulary = data.vocabulary
for key in data.terms :
self.terms[key] = URIRef(data.terms[key])
for key in data.ns :
self.ns[key] = (Namespace(data.ns[key]),False)
##################################################################################################################
class TermOrCurie :
"""
Wrapper around vocabulary management, ie, mapping a term to a URI, as well as a CURIE to a URI. Each instance of this class belongs to a
"state", instance of L{state.ExecutionContext}. Context definitions are managed at initialization time.
(In fact, this class is, conceptually, part of the overall state at a node, and has been separated here for an
easier maintenance.)
The class takes care of the stack-like behavior of vocabulary items, ie, inheriting everything that is possible
from the "parent". At initialization time, this works through the prefix definitions (i.e., C{@prefix} or C{@xmln:} attributes)
and/or C{@vocab} attributes.
@ivar state: State to which this instance belongs
@type state: L{state.ExecutionContext}
@ivar graph: The RDF Graph under generation
@type graph: rdflib.Graph
@ivar terms: mapping from terms to URI-s
@type terms: dictionary
@ivar ns: namespace declarations, ie, mapping from prefixes to URIs
@type ns: dictionary
@ivar default_curie_uri: URI for a default CURIE
"""
def __init__(self, state, graph, inherited_state) :
"""Initialize the vocab bound to a specific state.
@param state: the state to which this vocab instance belongs to
@type state: L{state.ExecutionContext}
@param graph: the RDF graph being worked on
@type graph: rdflib.Graph
@param inherited_state: the state inherited by the current state. 'None' if this is the top level state.
@type inherited_state: L{state.ExecutionContext}
"""
def check_prefix(pr) :
from . import uri_schemes
if pr in uri_schemes :
# The prefix being defined is a registered URI scheme, better avoid it...
state.options.add_warning(err_redefining_URI_as_prefix % pr, node=state.node.nodeName)
self.state = state
self.graph = graph
# --------------------------------------------------------------------------------
# This is set to non-void only on the top level and in the case of 1.1
default_vocab = InitialContext(self.state, inherited_state == None)
# Set the default CURIE URI
if inherited_state == None :
# This is the top level...
self.default_curie_uri = Namespace(XHTML_URI)
# self.graph.bind(XHTML_PREFIX, self.default_curie_uri)
else :
self.default_curie_uri = inherited_state.term_or_curie.default_curie_uri
# --------------------------------------------------------------------------------
# Set the default term URI
# This is a 1.1 feature, ie, should be ignored if the version is < 1.0
if state.rdfa_version >= "1.1" :
# that is the absolute default setup...
if inherited_state == None :
self.default_term_uri = None
else :
self.default_term_uri = inherited_state.term_or_curie.default_term_uri
# see if the initial context has defined a default vocabulary:
if default_vocab.vocabulary :
self.default_term_uri = default_vocab.vocabulary
# see if there is local vocab that would override previous settings
# However, care should be taken with the vocab="" value that should not become a URI...
# Indeed, this value is used to 'vipe out', ie, get back to the default vocabulary...
if self.state.node.hasAttribute("vocab") and self.state.node.getAttribute("vocab") == "" :
self.default_term_uri = default_vocab.vocabulary
else :
def_term_uri = self.state.getURI("vocab")
if def_term_uri and def_term_uri != "" :
self.default_term_uri = def_term_uri
self.graph.add((URIRef(self.state.base),RDFA_VOCAB,URIRef(def_term_uri)))
else :
self.default_term_uri = None
# --------------------------------------------------------------------------------
# The simpler case: terms, adding those that have been defined by a possible initial context
if inherited_state is None :
# this is the vocabulary belonging to the top level of the tree!
self.terms = {}
if state.rdfa_version >= "1.1" :
# Simply get the terms defined by the default vocabularies. There is no need for merging
for key in default_vocab.terms :
self.terms[key] = default_vocab.terms[key]
else :
# The terms are hardwired...
for key in predefined_1_0_rel :
self.terms[key] = URIRef(XHTML_URI + key)
else :
# just refer to the inherited terms
self.terms = inherited_state.term_or_curie.terms
#-----------------------------------------------------------------
# the locally defined namespaces
dict = {}
# locally defined xmlns namespaces, necessary for correct XML Literal generation
xmlns_dict = {}
# Add the locally defined namespaces using the xmlns: syntax
for i in range(0, state.node.attributes.length) :
attr = state.node.attributes.item(i)
if attr.name.find('xmlns:') == 0 :
# yep, there is a namespace setting
prefix = attr.localName
if prefix != "" : # exclude the top level xmlns setting...
if state.rdfa_version >= "1.1" and state.options.host_language in warn_xmlns_usage :
state.options.add_warning(err_xmlns_deprecated % prefix, IncorrectPrefixDefinition, node=state.node.nodeName)
if prefix == "_" :
state.options.add_warning(err_bnode_local_prefix, IncorrectPrefixDefinition, node=state.node.nodeName)
elif prefix.find(':') != -1 :
state.options.add_warning(err_col_local_prefix % prefix, IncorrectPrefixDefinition, node=state.node.nodeName)
else :
# quote the URI, ie, convert special characters into %.. This is
# true, for example, for spaces
uri = quote_URI(attr.value, state.options)
# create a new RDFLib Namespace entry
ns = Namespace(uri)
# Add an entry to the dictionary if not already there (priority is left to right!)
if state.rdfa_version >= "1.1" :
pr = prefix.lower()
else :
pr = prefix
dict[pr] = ns
xmlns_dict[pr] = ns
self.graph.bind(pr,ns)
check_prefix(pr)
# Add the locally defined namespaces using the @prefix syntax
# this may override the definition @xmlns
if state.rdfa_version >= "1.1" and state.node.hasAttribute("prefix") :
pr = state.node.getAttribute("prefix")
if pr != None :
# separator character is whitespace
pr_list = pr.strip().split()
# range(0, len(pr_list), 2)
for i in range(len(pr_list) - 2, -1, -2) :
prefix = pr_list[i]
# see if there is a URI at all
if i == len(pr_list) - 1 :
state.options.add_warning(err_missing_URI_prefix % (prefix,pr), node=state.node.nodeName)
break
else :
value = pr_list[i+1]
# see if the value of prefix is o.k., ie, there is a ':' at the end
if prefix[-1] != ':' :
state.options.add_warning(err_invalid_prefix % (prefix,pr), IncorrectPrefixDefinition, node=state.node.nodeName)
continue
elif prefix == ":" :
state.options.add_warning(err_no_default_prefix % pr, IncorrectPrefixDefinition, node=state.node.nodeName)
continue
else :
prefix = prefix[:-1]
uri = Namespace(quote_URI(value, state.options))
if prefix == "" :
#something to be done here
self.default_curie_uri = uri
elif prefix == "_" :
state.options.add_warning(err_bnode_local_prefix, IncorrectPrefixDefinition, node=state.node.nodeName)
else :
# last check: is the prefix an NCNAME?
if ncname.match(prefix) :
real_prefix = prefix.lower()
dict[real_prefix] = uri
self.graph.bind(real_prefix,uri)
# Additional warning: is this prefix overriding an existing xmlns statement with a different URI? if
# so, that may lead to discrepancies between an RDFa 1.0 and RDFa 1.1 run...
if (prefix in xmlns_dict and xmlns_dict[prefix] != uri) or (real_prefix in xmlns_dict and xmlns_dict[real_prefix] != uri) :
state.options.add_warning(err_prefix_and_xmlns % (real_prefix,real_prefix), node=state.node.nodeName)
check_prefix(real_prefix)
else :
state.options.add_warning(err_non_ncname_prefix % (prefix,pr), IncorrectPrefixDefinition, node=state.node.nodeName)
# See if anything has been collected at all.
# If not, the namespaces of the incoming state is
# taken over by reference. Otherwise that is copied to the
# the local dictionary
if inherited_state == None :
self.default_prefixes = default_vocab.ns
inherited_prefixes = {}
else :
self.default_prefixes = inherited_state.term_or_curie.default_prefixes
inherited_prefixes = inherited_state.term_or_curie.ns
if len(dict) == 0 :
self.ns = inherited_prefixes
else :
self.ns = {}
for key in inherited_prefixes : self.ns[key] = inherited_prefixes[key]
for key in dict :
if (key in inherited_prefixes and dict[key] != inherited_prefixes[key]) or (key in self.default_prefixes and dict[key] != self.default_prefixes[key][0]) :
state.options.add_warning(err_prefix_redefinition % key, PrefixRedefinitionWarning, node=state.node.nodeName)
self.ns[key] = dict[key]
# the xmlns prefixes have to be stored separately, again for XML Literal generation
self.xmlns = {}
if len(xmlns_dict) == 0 and inherited_state :
self.xmlns = inherited_state.term_or_curie.xmlns
else :
if inherited_state :
for key in inherited_state.term_or_curie.xmlns : self.xmlns[key] = inherited_state.term_or_curie.xmlns[key]
for key in xmlns_dict : self.xmlns[key] = xmlns_dict[key]
else :
self.xmlns = xmlns_dict
# end __init__
def _check_reference(self, val) :
"""Checking the CURIE reference for correctness. It is probably not 100% foolproof, but may take care
of some of the possible errors. See the URI RFC for the details.
"""
def char_check(s, not_allowed = ['#','[',']']) :
for c in not_allowed :
if s.find(c) != -1 : return False
return True
# Creating an artificial http URI to fool the urlparse module...
scheme, netloc, url, query, fragment = urlsplit('http:' + val)
if netloc != "" and self.state.rdfa_version >= "1.1" :
self.state.options.add_warning(err_absolute_reference % (netloc, val), UnresolvableReference, node=self.state.node.nodeName)
return False
elif not char_check(query) :
self.state.options.add_warning(err_query_reference % (query, val), UnresolvableReference, node=self.state.node.nodeName)
return False
elif not char_check(fragment) :
self.state.options.add_warning(err_fragment_reference % (fragment, val), UnresolvableReference, node=self.state.node.nodeName)
return False
else :
return True
def CURIE_to_URI(self, val) :
"""CURIE to URI mapping.
This method does I{not} take care of the last step of CURIE processing, ie, the fact that if
it does not have a CURIE then the value is used a URI. This is done on the caller's side, because this has
to be combined with base, for example. The method I{does} take care of BNode processing, though, ie,
CURIE-s of the form "_:XXX".
@param val: the full CURIE
@type val: string
@return: URIRef of a URI or None.
"""
# Just to be on the safe side:
if val == "" :
return None
elif val == ":" :
if self.default_curie_uri :
return URIRef(self.default_curie_uri)
else :
return None
# See if this is indeed a valid CURIE, ie, it can be split by a colon
curie_split = val.split(':',1)
if len(curie_split) == 1 :
# there is no ':' character in the string, ie, it is not a valid CURIE
return None
else :
if self.state.rdfa_version >= "1.1" :
prefix = curie_split[0].lower()
else :
prefix = curie_split[0]
reference = curie_split[1]
#if len(reference) > 0 :
# if self.state.rdfa_version >= "1.1" and (len(prefix) == 0 or prefix in self.ns) and reference.startswith('//') :
# # This has been defined as illegal in RDFa 1.1
# self.state.options.add_warning(err_absolute_reference % (reference, val), UnresolvableReference, node=self.state.node.nodeName)
# return None
# if reference[0] == ":" :
# return None
# first possibility: empty prefix
if len(prefix) == 0 :
if self.default_curie_uri and self._check_reference(reference) :
return self.default_curie_uri[reference]
else :
return None
else :
# prefix is non-empty; can be a bnode
if prefix == "_" :
# yep, BNode processing. There is a difference whether the reference is empty or not...
if len(reference) == 0 :
return _empty_bnode
else :
# see if this variable has been used before for a BNode
if reference in _bnodes :
return _bnodes[reference]
else :
# a new bnode...
retval = BNode()
_bnodes[reference] = retval
return retval
# check if the prefix is a valid NCNAME
elif ncname.match(prefix) :
# see if there is a binding for this:
if prefix in self.ns and self._check_reference(reference) :
# yep, a binding has been defined!
if len(reference) == 0 :
return URIRef(str(self.ns[prefix]))
else :
return self.ns[prefix][reference]
elif prefix in self.default_prefixes and self._check_reference(reference) :
# this has been defined through the default context
if len(reference) == 0 :
return URIRef(str(self.default_prefixes[prefix][0]))
else :
(ns,used) = self.default_prefixes[prefix]
# lazy binding of prefixes (to avoid unnecessary prefix definitions in the serializations at the end...)
if not used :
self.graph.bind(prefix,ns)
self.default_prefixes[prefix] = (ns,True)
return ns[reference]
else :
# no definition for this thing...
return None
else :
return None
# end CURIE_to_URI
def term_to_URI(self, term) :
"""A term to URI mapping, where term is a simple string and the corresponding
URI is defined via the @vocab (ie, default term uri) mechanism. Returns None if term is not defined
@param term: string
@return: an RDFLib URIRef instance (or None)
"""
if len(term) == 0 : return None
if termname.match(term) :
# It is a valid NCNAME
# First of all, a @vocab nukes everything. That has to be done first...
if self.default_term_uri != None :
return URIRef(self.default_term_uri + term)
# For default terms, the algorithm is (see 7.4.3 of the document): first make a case sensitive match;
# if that fails than make a case insensive one
# 1. simple, case sensitive test:
if term in self.terms :
# yep, term is a valid key as is
# lazy binding of the xhv prefix for terms...
self.graph.bind(XHTML_PREFIX, XHTML_URI)
return self.terms[term]
# 2. case insensitive test
for defined_term in self.terms :
if term.lower() == defined_term.lower() :
# lazy binding of the xhv prefix for terms...
self.graph.bind(XHTML_PREFIX, XHTML_URI)
return self.terms[defined_term]
# If it got here, it is all wrong...
return None
|
solashirai/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/library_root_xblock.py
|
42
|
"""
'library' XBlock (LibraryRoot)
"""
import logging
from xmodule.studio_editable import StudioEditableModule
from xblock.fields import Scope, String, List, Boolean
from xblock.fragment import Fragment
from xblock.core import XBlock
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class LibraryRoot(XBlock):
"""
The LibraryRoot is the root XBlock of a content library. All other blocks in
the library are its children. It contains metadata such as the library's
display_name.
"""
display_name = String(
help=_("Enter the name of the library as it should appear in Studio."),
default="Library",
display_name=_("Library Display Name"),
scope=Scope.settings
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your library."),
scope=Scope.settings,
xml_node=True,
)
show_children_previews = Boolean(
display_name="Hide children preview",
help="Choose if preview of library contents is shown",
scope=Scope.user_state,
default=True
)
has_children = True
has_author_view = True
def __unicode__(self):
return u"Library: {}".format(self.display_name)
def __str__(self):
return unicode(self).encode('utf-8')
def author_view(self, context):
"""
Renders the Studio preview view.
"""
fragment = Fragment()
self.render_children(context, fragment, can_reorder=False, can_add=True)
return fragment
def render_children(self, context, fragment, can_reorder=False, can_add=False): # pylint: disable=unused-argument
"""
Renders the children of the module with HTML appropriate for Studio. Reordering is not supported.
"""
contents = []
paging = context.get('paging', None)
children_count = len(self.children) # pylint: disable=no-member
item_start, item_end = 0, children_count
# TODO sort children
if paging:
page_number = paging.get('page_number', 0)
raw_page_size = paging.get('page_size', None)
page_size = raw_page_size if raw_page_size is not None else children_count
item_start, item_end = page_size * page_number, page_size * (page_number + 1)
children_to_show = self.children[item_start:item_end] # pylint: disable=no-member
force_render = context.get('force_render', None)
for child_key in children_to_show:
# Children must have a separate context from the library itself. Make a copy.
child_context = context.copy()
child_context['show_preview'] = self.show_children_previews
child_context['can_edit_visibility'] = False
child = self.runtime.get_block(child_key)
child_view_name = StudioEditableModule.get_preview_view_name(child)
if unicode(child.location) == force_render:
child_context['show_preview'] = True
if child_context['show_preview']:
rendered_child = self.runtime.render_child(child, child_view_name, child_context)
else:
rendered_child = self.runtime.render_child_placeholder(child, child_view_name, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': unicode(child.location),
'content': rendered_child.content,
})
fragment.add_content(
self.runtime.render_template("studio_render_paged_children_view.html", {
'items': contents,
'xblock_context': context,
'can_add': can_add,
'first_displayed': item_start,
'total_children': children_count,
'displayed_children': len(children_to_show),
'previews': self.show_children_previews
})
)
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.scope_ids.usage_id.course_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.scope_ids.usage_id.course_key.library
@XBlock.json_handler
def trigger_previews(self, request_body, suffix): # pylint: disable=unused-argument
""" Enable or disable previews in studio for library children. """
self.show_children_previews = request_body.get('showChildrenPreviews', self.show_children_previews)
return {'showChildrenPreviews': self.show_children_previews}
|
curbyourlitter/curbyourlitter-alley
|
refs/heads/master
|
curbyourlitter_alley/canrequests/management/commands/cleancanrequests.py
|
1
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from ...models import CanRequest
class Command(BaseCommand):
help = 'Clean can requests that are incompete'
def handle(self, *args, **options):
can_requests = CanRequest.objects.filter(
geom=None,
added__lt=now() - timedelta(minutes=5)
)
for can_request in can_requests:
can_request.delete()
|
yland/mailman3
|
refs/heads/develop
|
src/mailman/handlers/subject_prefix.py
|
8
|
# Copyright (C) 2014-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Subject header prefix munging."""
__all__ = [
'SubjectPrefix',
]
import re
from email.header import Header, make_header, decode_header
from mailman.core.i18n import _
from mailman.interfaces.handler import IHandler
from zope.interface import implementer
RE_PATTERN = '((RE|AW|SV|VS)(\[\d+\])?:\s*)+'
ASCII_CHARSETS = (None, 'ascii', 'us-ascii')
EMPTYSTRING = ''
def ascii_header(mlist, msgdata, subject, prefix, prefix_pattern, ws):
if mlist.preferred_language.charset not in ASCII_CHARSETS:
return None
for chunk, charset in decode_header(subject.encode()):
if charset not in ASCII_CHARSETS:
return None
subject_text = EMPTYSTRING.join(str(subject).splitlines())
rematch = re.match(RE_PATTERN, subject_text, re.I)
if rematch:
subject_text = subject_text[rematch.end():]
recolon = 'Re: '
else:
recolon = ''
# At this point, the subject may become null if someone posted mail
# with "Subject: [subject prefix]".
if subject_text.strip() == '':
with _.using(mlist.preferred_language.code):
subject_text = _('(no subject)')
else:
subject_text = re.sub(prefix_pattern, '', subject_text)
msgdata['stripped_subject'] = subject_text
lines = subject_text.splitlines()
first_line = [lines[0]]
if recolon:
first_line.insert(0, recolon)
if prefix:
first_line.insert(0, prefix)
subject_text = EMPTYSTRING.join(first_line)
return Header(subject_text, continuation_ws=ws)
def all_same_charset(mlist, msgdata, subject, prefix, prefix_pattern, ws):
list_charset = mlist.preferred_language.charset
chunks = []
for chunk, charset in decode_header(subject.encode()):
if charset is None:
charset = 'us-ascii'
chunks.append(chunk.decode(charset))
if charset != list_charset:
return None
subject_text = EMPTYSTRING.join(chunks)
rematch = re.match(RE_PATTERN, subject_text, re.I)
if rematch:
subject_text = subject_text[rematch.end():]
recolon = 'Re: '
else:
recolon = ''
# At this point, the subject may become null if someone posted mail
# with "Subject: [subject prefix]".
if subject_text.strip() == '':
with _.push(mlist.preferred_language.code):
subject_text = _('(no subject)')
else:
subject_text = re.sub(prefix_pattern, '', subject_text)
msgdata['stripped_subject'] = subject_text
lines = subject_text.splitlines()
first_line = [lines[0]]
if recolon:
first_line.insert(0, recolon)
if prefix:
first_line.insert(0, prefix)
subject_text = EMPTYSTRING.join(first_line)
return Header(subject_text, charset=list_charset, continuation_ws=ws)
def mixed_charsets(mlist, msgdata, subject, prefix, prefix_pattern, ws):
list_charset = mlist.preferred_language.charset
chunks = decode_header(subject.encode())
if len(chunks) == 0:
with _.push(mlist.preferred_language.code):
subject_text = _('(no subject)')
chunks = [(prefix, list_charset),
(subject_text, list_charset),
]
return make_header(chunks, continuation_ws=ws)
# Only search the first chunk for Re and existing prefix.
chunk_text, chunk_charset = chunks[0]
if chunk_charset is None:
chunk_charset = 'us-ascii'
first_text = chunk_text.decode(chunk_charset)
first_text = re.sub(prefix_pattern, '', first_text).lstrip()
rematch = re.match(RE_PATTERN, first_text, re.I)
if rematch:
first_text = 'Re: ' + first_text[rematch.end():]
chunks[0] = (first_text, chunk_charset)
# The subject text stripped of the prefix, for use in the NNTP gateway.
msgdata['stripped_subject'] = str(make_header(chunks, continuation_ws=ws))
chunks.insert(0, (prefix, list_charset))
return make_header(chunks, continuation_ws=ws)
@implementer(IHandler)
class SubjectPrefix:
"""Add a list-specific prefix to the Subject header value."""
name = 'subject-prefix'
description = _('Add a list-specific prefix to the Subject header value.')
def process(self, mlist, msg, msgdata):
"""See `IHandler`."""
if msgdata.get('isdigest') or msgdata.get('_fasttrack'):
return
prefix = mlist.subject_prefix
if not prefix.strip():
return
subject = msg.get('subject', '')
# Turn the value into a Header instance and try to figure out what
# continuation whitespace is being used.
# Save the original Subject.
msgdata['original_subject'] = subject
if isinstance(subject, Header):
subject_text = str(subject)
else:
subject = make_header(decode_header(subject))
subject_text = str(subject)
lines = subject_text.splitlines()
ws = '\t'
if len(lines) > 1 and lines[1] and lines[1][0] in ' \t':
ws = lines[1][0]
# If the subject_prefix contains '%d', it is replaced with the mailing
# list's sequence number. The sequential number format allows '%d' or
# '%05d' like pattern.
prefix_pattern = re.escape(prefix)
# Unescape '%'.
prefix_pattern = '%'.join(prefix_pattern.split(r'\%'))
p = re.compile('%\d*d')
if p.search(prefix, 1):
# The prefix has number, so we should search prefix w/number in
# subject. Also, force new style.
prefix_pattern = p.sub(r'\s*\d+\s*', prefix_pattern)
# Substitute %d in prefix with post_id
try:
prefix = prefix % mlist.post_id
except TypeError:
pass
for handler in (ascii_header,
all_same_charset,
mixed_charsets,
):
new_subject = handler(
mlist, msgdata, subject, prefix, prefix_pattern, ws)
if new_subject is not None:
del msg['subject']
msg['Subject'] = new_subject
return
|
YUNZHONGTAN/MY_Python_script
|
refs/heads/master
|
soft_ware/pymongo-3.2/pymongo/monitoring.py
|
18
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools to monitor driver events.
Use :func:`register` to register global listeners for specific events.
Currently only command events are published. Listeners must be
a subclass of :class:`CommandListener` and implement
:meth:`~CommandListener.started`, :meth:`~CommandListener.succeeded`, and
:meth:`~CommandListener.failed`.
For example, a simple command logger might be implemented like this::
import logging
from pymongo import monitoring
class CommandLogger(monitoring.CommandListener):
def started(self, event):
logging.info("Command {0.command_name} with request id "
"{0.request_id} started on server "
"{0.connection_id}".format(event))
def succeeded(self, event):
logging.info("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"succeeded in {0.duration_micros} "
"microseconds".format(event))
def failed(self, event):
logging.info("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"failed in {0.duration_micros} "
"microseconds".format(event))
monitoring.register(CommandLogger())
Event listeners can also be registered per instance of
:class:`~pymongo.mongo_client.MongoClient`::
client = MongoClient(event_listeners=[CommandLogger()])
Note that previously registered global listeners are automatically included when
configuring per client event listeners. Registering a new global listener will
not add that listener to existing client instances.
.. note:: Events are delivered **synchronously**. Application threads block
waiting for event handlers (e.g. :meth:`~CommandListener.started`) to
return. Care must be taken to ensure that your event handlers are efficient
enough to not adversely affect overall application performance.
.. warning:: The command documents published through this API are *not* copies.
If you intend to modify them in any way you must copy them in your event
handler first.
"""
import sys
import traceback
from collections import namedtuple, Sequence
_Listeners = namedtuple('Listeners', ('command_listeners',))
_LISTENERS = _Listeners([])
class CommandListener(object):
"""Abstract base class for command listeners."""
def started(self, event):
"""Abstract method to handle CommandStartedEvent.
:Parameters:
- `event`: An instance of :class:`CommandStartedEvent`
"""
raise NotImplementedError
def succeeded(self, event):
"""Abstract method to handle CommandSucceededEvent.
:Parameters:
- `event`: An instance of :class:`CommandSucceededEvent`
"""
raise NotImplementedError
def failed(self, event):
"""Abstract method to handle CommandFailedEvent.
:Parameters:
- `event`: An instance of :class:`CommandFailedEvent`
"""
raise NotImplementedError
def _to_micros(dur):
"""Convert duration 'dur' to microseconds."""
if hasattr(dur, 'total_seconds'):
return int(dur.total_seconds() * 10e5)
# Python 2.6
return dur.microseconds + (dur.seconds + dur.days * 24 * 3600) * 1000000
def _validate_event_listeners(option, listeners):
"""Validate event listeners"""
if not isinstance(listeners, Sequence):
raise TypeError("%s must be a list or tuple" % (option,))
for listener in listeners:
if not isinstance(listener, CommandListener):
raise TypeError("Only subclasses of "
"pymongo.monitoring.CommandListener are supported")
return listeners
def register(listener):
"""Register a global event listener.
:Parameters:
- `listener`: A subclass of :class:`CommandListener`.
"""
_validate_event_listeners('listener', [listener])
_LISTENERS.command_listeners.append(listener)
def _handle_exception():
"""Print exceptions raised by subscribers to stderr."""
# Heavily influenced by logging.Handler.handleError.
# See note here:
# https://docs.python.org/3.4/library/sys.html#sys.__stderr__
if sys.stderr:
einfo = sys.exc_info()
try:
traceback.print_exception(einfo[0], einfo[1], einfo[2],
None, sys.stderr)
except IOError:
pass
finally:
del einfo
# Note - to avoid bugs from forgetting which if these is all lowercase and
# which are camelCase, and at the same time avoid having to add a test for
# every command, use all lowercase here and test against command_name.lower().
_SENSITIVE_COMMANDS = set(
["authenticate", "saslstart", "saslcontinue", "getnonce", "createuser",
"updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"])
class _CommandEvent(object):
"""Base class for command events."""
__slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id")
def __init__(self, command_name, request_id, connection_id, operation_id):
self.__cmd_name = command_name
self.__rqst_id = request_id
self.__conn_id = connection_id
self.__op_id = operation_id
@property
def command_name(self):
"""The command name."""
return self.__cmd_name
@property
def request_id(self):
"""The request id for this operation."""
return self.__rqst_id
@property
def connection_id(self):
"""The address (host, port) of the server this command was sent to."""
return self.__conn_id
@property
def operation_id(self):
"""An id for this series of events or None."""
return self.__op_id
class CommandStartedEvent(_CommandEvent):
"""Event published when a command starts.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__cmd", "__db")
def __init__(self, command, database_name, *args):
if not command:
raise ValueError("%r is not a valid command" % (command,))
# Command name must be first key.
command_name = next(iter(command))
super(CommandStartedEvent, self).__init__(command_name, *args)
if command_name.lower() in _SENSITIVE_COMMANDS:
self.__cmd = {}
else:
self.__cmd = command
self.__db = database_name
@property
def command(self):
"""The command document."""
return self.__cmd
@property
def database_name(self):
"""The name of the database this command was run against."""
return self.__db
class CommandSucceededEvent(_CommandEvent):
"""Event published when a command succeeds.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__duration_micros", "__reply")
def __init__(self, duration, reply, command_name,
request_id, connection_id, operation_id):
super(CommandSucceededEvent, self).__init__(
command_name, request_id, connection_id, operation_id)
self.__duration_micros = _to_micros(duration)
if command_name.lower() in _SENSITIVE_COMMANDS:
self.__reply = {}
else:
self.__reply = reply
@property
def duration_micros(self):
"""The duration of this operation in microseconds."""
return self.__duration_micros
@property
def reply(self):
"""The server failure document for this operation."""
return self.__reply
class CommandFailedEvent(_CommandEvent):
"""Event published when a command fails.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__duration_micros", "__failure")
def __init__(self, duration, failure, *args):
super(CommandFailedEvent, self).__init__(*args)
self.__duration_micros = _to_micros(duration)
self.__failure = failure
@property
def duration_micros(self):
"""The duration of this operation in microseconds."""
return self.__duration_micros
@property
def failure(self):
"""The server failure document for this operation."""
return self.__failure
class _EventListeners(object):
"""Configure event listeners for a client instance.
Any event listeners registered globally are included by default.
:Parameters:
- `listeners`: A list of event listeners.
"""
def __init__(self, listeners):
self.__command_listeners = _LISTENERS.command_listeners[:]
if listeners is not None:
self.__command_listeners.extend(listeners)
self.__enabled_for_commands = bool(self.__command_listeners)
@property
def enabled_for_commands(self):
"""Are any CommandListener instances registered?"""
return self.__enabled_for_commands
@property
def event_listeners(self):
"""List of registered event listeners."""
return self.__command_listeners[:]
def publish_command_start(self, command, database_name,
request_id, connection_id, op_id=None):
"""Publish a CommandStartedEvent to all command listeners.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run
against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandStartedEvent(
command, database_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.started(event)
except Exception:
_handle_exception()
def publish_command_success(self, duration, reply, command_name,
request_id, connection_id, op_id=None):
"""Publish a CommandSucceededEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandSucceededEvent(
duration, reply, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.succeeded(event)
except Exception:
_handle_exception()
def publish_command_failure(self, duration, failure, command_name,
request_id, connection_id, op_id=None):
"""Publish a CommandFailedEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document or failure description
document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandFailedEvent(
duration, failure, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.failed(event)
except Exception:
_handle_exception()
|
ntoll/code-dojo
|
refs/heads/master
|
adventure/week3/team2/adventure.py
|
1
|
#!/usr/bin/python
from cmd import Cmd
DIRECTIONS = 'N', 'E', 'S', 'W'
NORTH, EAST, SOUTH, WEST = DIRECTIONS
class Player(object):
def __init__(self, location, name='Player'):
assert isinstance(location, Location)
self.location = location
self.name = name
self.inventory = {}
class GameObject(object):
def __init__(self, name):
self.name = name
class Location(object):
def __init__(self, name, description=""):
self.name = name
self.description = description
self.exits = dict()
self.objects = {}
def __str__(self):
return self.name
def add_direction(self, direction, other_location):
assert direction in DIRECTIONS
self.exits[direction] = other_location
def describe(self):
out = ''
out += "Current location: %s\n%s\n" % (self.name, self.description)
for direction, location in self.exits.items():
out += "\t%s (%s)\n" % (location, direction)
if self.objects:
out += "There are the following items here:\n"
for object in self.objects.keys():
out += "\t%s" % object
return out
sample_universe = """
:Garage
You are in the garage. There are no cars here currently.
E:Bedroom
W:Kitchen
:Kitchen
The kitchen is immaculate. You suspect that nobody has ever actually prepared any food here.
E:Garage
"""
def test_location():
startroot = Location('Start room')
kitchen = Location('Kitchen')
startroot.add_direction(NORTH, kitchen)
def test_player():
lobby = Location('Lobby')
john = Player(lobby, 'John')
def load_locations(content):
location = first_location = None
locations = {}
for line in content:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith("*"):
break
if line.startswith(':'):
location = Location(line[1:])
locations[line[1:]] = location
if not first_location:
first_location = location
elif location is not None and not location.description:
location.description = line
else:
direction, destination = line.split(':', 1)
location.add_direction(direction, destination)
for location in locations.values():
for direction, destination in location.exits.items():
try:
location.add_direction(direction, locations[destination])
except KeyError:
raise SystemError("Your universe file sucks! %s" % destination)
return locations, first_location
def load_gameobjects(content):
lines = list(content)
indexes = [i for i, j in enumerate(lines) if j.startswith("*")]
current_game_object = None
game_state = []
for index in indexes:
current_game_object = GameObject(lines[index][1:].strip())
current_game_object.location = lines[index+1].strip()
current_game_object.description = lines[index+2].strip()
if lines[index+3].strip() != "":
current_game_object.aliases = lines[index+3].replace(" ", "").split(",")
game_state.append(current_game_object)
return game_state
def load_universe(content):
locations, start_room = load_locations(content)
game_objects = load_gameobjects(content)
for object in game_objects:
locations[object.location].objects[object.name] = object
return locations, start_room, game_objects
class Game(Cmd):
def __init__(self, gamefile, player_name):
Cmd.__init__(self)
self.locations, self.start_room, self.gameobjects = load_universe(file(gamefile))
self.player = Player(self.start_room, player_name)
print self.player.location.describe()
def do_move(self, direction):
direction = direction.upper()
newroom = self.player.location.exits.get(direction,None)
if newroom == None:
print "No pass around!"
return
self.player.location = self.player.location.exits[direction]
do_go = do_move
def do_look(self, what):
location = self.player.location
if what == "":
location.describe()
elif what in location.exits:
# TODO validate where
newroom = location.exits[where]
print newroom.describe()
elif what in location.objects:
print location.objects[what].description
else:
print "What are you looking at punk!"
def postcmd(self, stop, x):
print self.player.location.describe()
def do_bye(self, foo):
sys.exit()
def do_pickup(self, object):
if object in self.player.location.objects:
self.player.inventory[object] = self.player.location.objects.pop(object)
print "Picked up %s" % object
else:
print "No %s here" % object
def do_drop(self, object):
if object in self.player.inventory:
self.player.location.objects[object] = self.player.inventory.pop(object)
print "Dropped %s" % object
else:
print "your not holding %s" % object
def play(gamefile):
#start_room = _create_universe()
player_name = raw_input('Player name?: ') or 'No name'
g = Game(gamefile, player_name)
g.cmdloop()
''' while True:
if not player.location.exits:
print "No more exits! GAME OVER!"
break
next_direction = raw_input('Where to next? ').upper()
while next_direction not in player.location.exits.keys():
next_direction = raw_input('Where to next? (%s) ' %\
', '.join(player.location.exits.keys())).upper()
player.location = player.location.exits[next_direction]
'''
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print "Usage: %s DATAFILE" % sys.argv[0]
sys.exit(1)
if sys.argv[1] == 'test':
test_location()
test_player()
sys.exit(0)
try:
play(sys.argv[1])
except KeyboardInterrupt:
pass
|
havard024/prego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/locale/zh_CN/__init__.py
|
12133432
| |
fidodaj/project3
|
refs/heads/master
|
server/lib/flask/exthook.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
|
Eforcers/inbox-cleaner
|
refs/heads/master
|
src/lib/gdata/tlslite/FileObject.py
|
359
|
"""Class returned by TLSConnection.makefile()."""
class FileObject:
"""This class provides a file object interface to a
L{tlslite.TLSConnection.TLSConnection}.
Call makefile() on a TLSConnection to create a FileObject instance.
This class was copied, with minor modifications, from the
_fileobject class in socket.py. Note that fileno() is not
implemented."""
default_bufsize = 16384 #TREV: changed from 8192
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is not None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
for result in self._sock._decrefAsync(): #TREV
pass
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
#def fileno(self):
# raise NotImplementedError() #TREV
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.