repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Vijaysai005/KProject | refs/heads/master | vijay/DBSCAN/generate_alert.py | 1 | # usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
from pymongo import MongoClient
from datetime import datetime
from operator import itemgetter
def TimeInMinutes(hours, minutes, seconds):
return 60*int(hours) + int(minutes) + (int(seconds) / 60)
def Generate_Alert(ride_id, cluster_instance, threshold_minute):
"""
Parameters
----------
ride_id : int, required
The id number given for particular ride.
cluster_instance : int, required
No. of latest cluster instances should be taken.
threshold_minute : int, required
The minute required to check whether the biker is outside the
cluster or not
"""
client = MongoClient("localhost", 27017)
collection = client.maximus_db.clus
current_time = datetime(2017,3,26,15,7)
if ride_id is None:
items = collection.find({"timestamp":{"$lte":current_time}},{"timestamp":1,"cluster_number":1,"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("timestamp", -1)])
data = [item for item in items]
latest_data = sorted(data[:cluster_instance], key=itemgetter('timestamp'))
print (latest_data)
current_time_stamp = latest_data[0]["timestamp"]
sum_min = 0 ; diff = 0
cluster_number = latest_data[0]["cluster_number"]
try:
max_cluster = int(max([latest_data[i]["cluster_number"] for i in range(cluster_instance) if latest_data[i]["cluster_number"] != "outlier"]))
except ValueError:
max_cluster = 1
curr_clus_id = [[] for i in range(max_cluster)] ; curr_out_id = [] ; fix_clus_id = [[] for i in range(max_cluster)]
check_clus_list = [[] for i in range(max_cluster)]
for i in range(cluster_instance):
if latest_data[i]["timestamp"] != current_time_stamp:
diff = abs(current_time_stamp - latest_data[i]["timestamp"])
diff = TimeInMinutes(str(diff)[0:1], str(diff)[2:4], str(diff)[5:7])
current_time_stamp = latest_data[i]["timestamp"]
curr_out_id = []
try:
curr_clus_id[int(cluster_number)-1] = []
except ValueError:
pass
#print (latest_data[i]["cluster_number"])
if latest_data[i]["cluster_number"] != "outlier":
cluster_number = latest_data[i]["cluster_number"]
curr_clus_id[int(cluster_number)-1].append(latest_data[i]["unit_id"])
fix_clus_id[int(cluster_number)-1].append(latest_data[i]["unit_id"])
#print (curr_clus_id)
elif latest_data[i]["cluster_number"] == "outlier":
cluster_number = latest_data[i]["cluster_number"]
curr_out_id.append(latest_data[i]["unit_id"])
#print (current_time_stamp)
print (latest_data[i]["timestamp"])
print (curr_clus_id)
for a in range(len(curr_clus_id)):
check_clus_list[a].append(curr_clus_id[a])
#print (check_clus_list)
#print (curr_clus_id)
for j in range(len(curr_out_id)):
for k in range(len(fix_clus_id)):
if curr_out_id[j] in list(fix_clus_id)[k]:
sum_min = sum_min + diff
elif curr_out_id[j] not in list(fix_clus_id)[k]:
sum_min = 0
if sum_min >= threshold_minute:
print ("unit_id: ",curr_out_id[j],"is outside of cluster number ",k+1)
#print (check_clus_list)
#print (check_clus_list)
if __name__ == "__main__":
Generate_Alert(None, 100, 1)
|
modulexcite/nbgrader | refs/heads/master | docs/source/user_guide/jupyterhub/nbgrader_config.py | 3 | c = get_config()
c.NbGraderConfig.course_id = "example_course"
c.FormgradeApp.ip = "127.0.0.1"
c.FormgradeApp.port = 9000
c.FormgradeApp.authenticator_class = "nbgrader.auth.hubauth.HubAuth"
# Change this to be the path to the user guide folder in your clone of
# nbgrader, or just wherever you have your class files. This is relative
# to the root of the notebook server launched by JupyterHub, which is
# probably your home directory.
c.HubAuth.notebook_url_prefix = "path/to/class_files"
# Change this to be the list of unix usernames that are allowed to access
# the formgrader.
c.HubAuth.graders = ["jhamrick"]
# This specifies that the formgrader should automatically generate an api
# token to authenticate itself with JupyterHub.
c.HubAuth.generate_hubapi_token = True
# Change this to be the jupyterhub.sqlite located in the directory where
# you actually run JupyterHub.
c.HubAuth.hub_db = "path/to/jupyterhub.sqlite"
|
Kloenk/GarrysModserver | refs/heads/master | main.py | 1 | #!/usr/bin/env python3
#TODO Kommentare einfügen
if __name__ == '__main__':
print("programm running")
print("sending start email")
import GModServer.SendStartMail
GModServer.SendStartMail.StartServer(debug=True) #Send Start Mail and start Server
|
duydb2/ZTC | refs/heads/master | atc/django-atc-profile-storage/atc_profile_storage/views.py | 1 | from atc_profile_storage.models import Profile
from atc_profile_storage.serializers import ProfileSerializer
from functools import wraps
from rest_framework.exceptions import APIException
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.parsers import JSONParser
class BadGateway(APIException):
status_code = 502
default_detail = 'Could not connect to ATC gateway.'
def serviced(method):
'''
A decorator to check if the service is available or not.
Raise a BadGateway exception on failure to connect to the atc gateway
'''
@wraps(method)
def decorator(cls, request, *args, **kwargs):
service = None
# service = atcdClient()
# if service is None:
# raise BadGateway()
return method(cls, request, service, *args, **kwargs)
return decorator
class ProfilesApi(APIView):
@serviced
def get(self, request, service):
profiles = Profile.objects.all()
serializer = ProfileSerializer(profiles, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@serviced
def post(self, request, service):
data = request.DATA
profiles = Profile.objects.all()
profiles.delete()
serializer = ProfileSerializer(data=data, many=True)
if not serializer.is_valid():
raise ParseError(detail=serializer.errors)
serializer.save()
return Response(
serializer.data,
status=status.HTTP_201_CREATED
)
@serviced
def delete(self, request, service, pk=None):
profiles = Profile.objects.all()
profiles.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
class ProfileApi(APIView):
def get_object(self, pk, create=None):
""" get exist object if not, create """
try:
profile = Profile.objects.get(pk=pk)
except Profile.DoesNotExist as e:
if create:
profile = Profile.objects.create(id=pk, name='profile id=%s'%pk, content={u'up': [], u'down':[]})
else:
return Response(
e.message,
status=status.HTTP_404_OK
)
return profile
@serviced
def get(self, request, service, pk=None, format=None):
profile = self.get_object(pk, create=True)
serializer = ProfileSerializer(profile)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@serviced
def post(self, request, service, pk=None, format=None):
profile = self.get_object(pk, create=True)
data = request.DATA
serializer = ProfileSerializer(profile, data=data)
if not serializer.is_valid():
raise ParseError(detail=serializer.errors)
serializer.save()
return Response(
serializer.data,
status=status.HTTP_201_CREATED
)
@serviced
def delete(self, request, service, pk=None):
profile = self.get_object(pk)
if profile:
profile.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
# class JSONResponse(HttpResponse):
# def __init__(self, data, **kwargs):
# content = JSONRenderer().render(data)
# kwargs['content_type'] = 'application/json'
# super(JSONResponse, self).__init__(content, **kwargs)
#
#
# @csrf_exempt
# def profile_list(request):
# if request.method == 'GET':
# return JSONResponse(serializer.data)
# elif request.method == 'POST':
# return HttpResponse(status=405)
# else:
# return HttpResponse(status=405)
#
#
# @csrf_exempt
# def profile_detail(request, pk):
# try:
# profile = Profile.objects.get(pk=pk)
# except Profile.DoesNotExist:
# return HttpResponse(status=404)
#
# if request.method == 'GET':
# serializer = ProfileSerializer(profile)
# return JSONResponse(serializer.data)
#
# elif request.method == 'POST':
# data = JSONParser().parse(request)
# serializer = ProfileSerializer(profile, data=data)
# if serializer.is_valid():
# serializer.save()
# return JSONResponse(serializer.data)
# return JSONResponse(serializer.errors, status=400)
#
# elif request.method == 'DELETE':
# profile.delete()
# return HttpResponse(status=204)
#
# else:
# return HttpResponse(status=405)
|
nanditav/15712-TensorFlow | refs/heads/master | tensorflow/python/summary/impl/reservoir_test.py | 12 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.summary.impl import reservoir
class ReservoirTest(tf.test.TestCase):
def testEmptyReservoir(self):
r = reservoir.Reservoir(1)
self.assertFalse(r.Keys())
def testRespectsSize(self):
r = reservoir.Reservoir(42)
self.assertEqual(r._buckets['meaning of life']._max_size, 42)
def testItemsAndKeys(self):
r = reservoir.Reservoir(42)
r.AddItem('foo', 4)
r.AddItem('bar', 9)
r.AddItem('foo', 19)
self.assertItemsEqual(r.Keys(), ['foo', 'bar'])
self.assertEqual(r.Items('foo'), [4, 19])
self.assertEqual(r.Items('bar'), [9])
def testExceptions(self):
with self.assertRaises(ValueError):
reservoir.Reservoir(-1)
with self.assertRaises(ValueError):
reservoir.Reservoir(13.3)
r = reservoir.Reservoir(12)
with self.assertRaises(KeyError):
r.Items('missing key')
def testDeterminism(self):
"""Tests that the reservoir is deterministic."""
key = 'key'
r1 = reservoir.Reservoir(10)
r2 = reservoir.Reservoir(10)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertEqual(r1.Items(key), r2.Items(key))
def testBucketDeterminism(self):
"""Tests that reservoirs are deterministic at a bucket level.
This means that only the order elements are added within a bucket matters.
"""
separate_reservoir = reservoir.Reservoir(10)
interleaved_reservoir = reservoir.Reservoir(10)
for i in xrange(100):
separate_reservoir.AddItem('key1', i)
for i in xrange(100):
separate_reservoir.AddItem('key2', i)
for i in xrange(100):
interleaved_reservoir.AddItem('key1', i)
interleaved_reservoir.AddItem('key2', i)
for key in ['key1', 'key2']:
self.assertEqual(separate_reservoir.Items(key),
interleaved_reservoir.Items(key))
def testUsesSeed(self):
"""Tests that reservoirs with different seeds keep different samples."""
key = 'key'
r1 = reservoir.Reservoir(10, seed=0)
r2 = reservoir.Reservoir(10, seed=1)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertNotEqual(r1.Items(key), r2.Items(key))
def testFilterItemsByKey(self):
r = reservoir.Reservoir(100, seed=0)
for i in xrange(10):
r.AddItem('key1', i)
r.AddItem('key2', i)
self.assertEqual(len(r.Items('key1')), 10)
self.assertEqual(len(r.Items('key2')), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 7, 'key2'), 2)
self.assertEqual(len(r.Items('key2')), 8)
self.assertEqual(len(r.Items('key1')), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 3, 'key1'), 6)
self.assertEqual(len(r.Items('key1')), 4)
self.assertEqual(len(r.Items('key2')), 8)
class ReservoirBucketTest(tf.test.TestCase):
def testEmptyBucket(self):
b = reservoir._ReservoirBucket(1)
self.assertFalse(b.Items())
def testFillToSize(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(100):
b.AddItem(i)
self.assertEqual(b.Items(), list(xrange(100)))
self.assertEqual(b._num_items_seen, 100)
def testDoesntOverfill(self):
b = reservoir._ReservoirBucket(10)
for i in xrange(1000):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 1000)
def testMaintainsOrder(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
items = b.Items()
prev = -1
for item in items:
self.assertTrue(item > prev)
prev = item
def testKeepsLatestItem(self):
b = reservoir._ReservoirBucket(5)
for i in xrange(100):
b.AddItem(i)
last = b.Items()[-1]
self.assertEqual(last, i)
def testSizeOneBucket(self):
b = reservoir._ReservoirBucket(1)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), [i])
self.assertEqual(b._num_items_seen, 20)
def testSizeZeroBucket(self):
b = reservoir._ReservoirBucket(0)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), list(range(i + 1)))
self.assertEqual(b._num_items_seen, 20)
def testSizeRequirement(self):
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(-1)
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(10.3)
def testRemovesItems(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 10)
self.assertEqual(b.FilterItems(lambda x: x <= 7), 2)
self.assertEqual(len(b.Items()), 8)
self.assertEqual(b._num_items_seen, 8)
def testRemovesItemsWhenItemsAreReplaced(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
self.assertEqual(b._num_items_seen, 10000)
# Remove items
num_removed = b.FilterItems(lambda x: x <= 7)
self.assertGreater(num_removed, 92)
self.assertEqual([], [item for item in b.Items() if item > 7])
self.assertEqual(b._num_items_seen,
int(round(10000 * (1 - float(num_removed) / 100))))
def testLazyFunctionEvaluationAndAlwaysKeepLast(self):
class FakeRandom(object):
def randint(self, a, b): # pylint:disable=unused-argument
return 999
class Incrementer(object):
def __init__(self):
self.n = 0
def increment_and_double(self, x):
self.n += 1
return x * 2
# We've mocked the randomness generator, so that once it is full, the last
# item will never get durable reservoir inclusion. Since always_keep_last is
# false, the function should only get invoked 100 times while filling up
# the reservoir. This laziness property is an essential performance
# optimization.
b = reservoir._ReservoirBucket(100, FakeRandom(), always_keep_last=False)
incrementer = Incrementer()
for i in xrange(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 100)
self.assertEqual(b.Items(), [x * 2 for x in xrange(100)])
# This time, we will always keep the last item, meaning that the function
# should get invoked once for every item we add.
b = reservoir._ReservoirBucket(100, FakeRandom(), always_keep_last=True)
incrementer = Incrementer()
for i in xrange(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 1000)
self.assertEqual(b.Items(), [x * 2 for x in xrange(99)] + [999 * 2])
class ReservoirBucketStatisticalDistributionTest(tf.test.TestCase):
def setUp(self):
self.total = 1000000
self.samples = 10000
self.n_buckets = 100
self.total_per_bucket = self.total // self.n_buckets
self.assertEqual(self.total % self.n_buckets, 0, 'total must be evenly '
'divisible by the number of buckets')
self.assertTrue(self.total > self.samples, 'need to have more items '
'than samples')
def AssertBinomialQuantity(self, measured):
p = 1.0 * self.n_buckets / self.samples
mean = p * self.samples
variance = p * (1 - p) * self.samples
error = measured - mean
# Given that the buckets were actually binomially distributed, this
# fails with probability ~2E-9
passed = error * error <= 36.0 * variance
self.assertTrue(passed, 'found a bucket with measured %d '
'too far from expected %d' % (measured, mean))
def testBucketReservoirSamplingViaStatisticalProperties(self):
# Not related to a 'ReservoirBucket', but instead number of buckets we put
# samples into for testing the shape of the distribution
b = reservoir._ReservoirBucket(_max_size=self.samples)
# add one extra item because we always keep the most recent item, which
# would skew the distribution; we can just slice it off the end instead.
for i in xrange(self.total + 1):
b.AddItem(i)
divbins = [0] * self.n_buckets
modbins = [0] * self.n_buckets
# Slice off the last item when we iterate.
for item in b.Items()[0:-1]:
divbins[item // self.total_per_bucket] += 1
modbins[item % self.n_buckets] += 1
for bucket_index in xrange(self.n_buckets):
divbin = divbins[bucket_index]
modbin = modbins[bucket_index]
self.AssertBinomialQuantity(divbin)
self.AssertBinomialQuantity(modbin)
if __name__ == '__main__':
tf.test.main()
|
ftranschel/evoMPS | refs/heads/master | evoMPS/mps_gen.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 13 17:29:27 2011
@author: Ashley Milsted
"""
import scipy as sp
import scipy.linalg as la
import matmul as m
import tdvp_common as tm
import copy
import logging
log = logging.getLogger(__name__)
class EvoMPS_MPS_Generic(object):
def __init__(self, N, D, q):
"""Creates a new EvoMPS_MPS_Generic object.
This class implements basic operations on a generic MPS with
open boundary conditions on a finite chain.
Performs self.correct_bond_dimension().
Sites are numbered 1 to N.
self.A[n] is the parameter tensor for site n
with shape == (q[n], D[n - 1], D[n]).
Parameters
----------
N : int
The number of lattice sites.
D : ndarray
A 1d array, length N + 1, of integers indicating the desired
bond dimensions.
q : ndarray
A 1d array, length N + 1, of integers indicating the
dimension of the hilbert space for each site.
Entry 0 is ignored (there is no site 0).
"""
self.odr = 'C'
self.typ = sp.complex128
self.sanity_checks = True
"""Whether to perform additional (potentially costly) sanity checks."""
self.canonical_form = 'right'
"""Canonical form to use when performing restore_CF(). Possible
settings are 'right' and 'left'."""
self.eps = sp.finfo(self.typ).eps
self.zero_tol = sp.finfo(self.typ).resolution
"""Tolerance for detecting zeros. This is used when (pseudo-) inverting
l and r."""
self.N = N
"""The number of sites. Do not change after initializing."""
self.D = sp.array(D)
"""Vector containing the bond-dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
self.q = sp.array(q)
"""Vector containing the site Hilbert space dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
if (self.D.ndim != 1) or (self.q.ndim != 1):
raise ValueError('D and q must be 1-dimensional!')
if (self.D.shape[0] != N + 1) or (self.q.shape[0] != N + 1):
raise ValueError('D and q must have length N + 1')
self.correct_bond_dimension()
self._init_arrays()
self.initialize_state()
def _init_arrays(self):
self.A = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
self.r = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 0..N
self.l = sp.empty((self.N + 1), dtype=sp.ndarray)
self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
self.l[0] = sp.eye(self.D[0], self.D[0], dtype=self.typ).copy(order=self.odr) #Already set the 0th element (not a dummy)
for n in xrange(1, self.N + 1):
self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
self.A[n] = sp.empty((self.q[n], self.D[n - 1], self.D[n]), dtype=self.typ, order=self.odr)
sp.fill_diagonal(self.r[self.N], 1.)
def initialize_state(self):
"""Initializes the state to a hard-coded full rank state with norm 1.
"""
for n in xrange(1, self.N + 1):
self.A[n].fill(0)
f = sp.sqrt(1. / self.q[n])
if self.D[n-1] == self.D[n]:
for s in xrange(self.q[n]):
sp.fill_diagonal(self.A[n][s], f)
else:
x = 0
y = 0
s = 0
if self.D[n] > self.D[n - 1]:
f = 1.
for i in xrange(max((self.D[n], self.D[n - 1]))):
self.A[n][s, x, y] = f
x += 1
y += 1
if x >= self.A[n][s].shape[0]:
x = 0
s += 1
elif y >= self.A[n][s].shape[1]:
y = 0
s += 1
def randomize(self, do_update=True):
"""Randomizes the parameter tensors self.A.
Parameters
----------
do_update : bool (True)
Whether to perform self.update() after randomizing.
"""
for n in xrange(1, self.N + 1):
self.A[n] = ((sp.rand(*self.A[n].shape) - 0.5)
+ 1.j * (sp.rand(*self.A[n].shape) - 0.5))
self.A[n] /= la.norm(self.A[n])
if do_update:
self.update()
def add_noise(self, fac=1.0, do_update=True):
"""Adds some random (white) noise of a given magnitude to the parameter
tensors A.
Parameters
----------
fac : number
A factor determining the amplitude of the random noise.
do_update : bool (True)
Whether to perform self.update() after randomizing.
"""
for n in xrange(1, self.N + 1):
self.A[n].real += (sp.rand(*self.A[n].shape) - 0.5) * 2 * fac
self.A[n].imag += (sp.rand(*self.A[n].shape) - 0.5) * 2 * fac
if do_update:
self.update()
def correct_bond_dimension(self):
"""Reduces bond dimensions to the maximum physically useful values.
Bond dimensions will be adjusted where they are too high to be useful
(when they would be higher than the corresponding maximum
Schmidt ranks). The maximum value for D[n] is the minimum of the
dimensions of the two partial Hilbert spaces corresponding to a cut
between sites n and n + 1.
"""
self.D[0] = 1
self.D[self.N] = 1
qacc = 1
for n in xrange(self.N - 1, -1, -1):
if qacc < self.D.max(): #Avoid overflow!
qacc *= self.q[n + 1]
if self.D[n] > qacc:
self.D[n] = qacc
qacc = 1
for n in xrange(1, self.N + 1):
if qacc < self.D.max(): #Avoid overflow!
qacc *= self.q[n]
if self.D[n] > qacc:
self.D[n] = qacc
def update(self, restore_CF=True, normalize=True, auto_truncate=False, restore_CF_after_trunc=True):
"""Updates secondary quantities to reflect the state parameters self.A.
Must be used after changing the parameters self.A before calculating
physical quantities, such as expectation values.
Also (optionally) restores the right canonical form.
Parameters
----------
restore_RCF : bool (True)
Whether to restore right canonical form.
normalize : bool
Whether to normalize the state in case restore_CF is False.
auto_truncate : bool (True)
Whether to automatically truncate the bond-dimension if
rank-deficiency is detected. Requires restore_RCF.
restore_RCF_after_trunc : bool (True)
Whether to restore_RCF after truncation.
"""
assert restore_CF or not auto_truncate, "auto_truncate requires restore_RCF"
if restore_CF:
self.restore_CF()
if auto_truncate:
data = self.auto_truncate(update=False,
return_update_data=not restore_CF_after_trunc)
if data:
log.warning("Auto-truncated! New D: %s", self.D)
if restore_CF_after_trunc:
self.restore_CF()
else:
self._update_after_truncate(*data)
else:
self.calc_l()
if normalize:
self.simple_renorm(update_r=False)
self.calc_r()
def calc_l(self, n_low=-1, n_high=-1):
"""Updates the l matrices to reflect the current state parameters self.A.
Implements step 5 of the TDVP algorithm or, equivalently, eqn. (41).
(arXiv:1103.0936v2 [cond-mat.str-el])
"""
if n_low < 0:
n_low = 1
if n_high < 0:
n_high = self.N
for n in xrange(n_low, n_high + 1):
self.l[n] = tm.eps_l_noop(self.l[n - 1], self.A[n], self.A[n])
def calc_r(self, n_low=-1, n_high=-1):
"""Updates the r matrices using the current state parameters self.A.
Implements step 5 of the TDVP algorithm or, equivalently, eqn. (41).
(arXiv:1103.0936v2 [cond-mat.str-el])
"""
if n_low < 0:
n_low = 0
if n_high < 0:
n_high = self.N - 1
for n in xrange(n_high, n_low - 1, -1):
self.r[n] = tm.eps_r_noop(self.r[n + 1], self.A[n + 1], self.A[n + 1])
def simple_renorm(self, update_r=True):
"""Renormalize the state by altering A[N] by a factor.
We change A[N] only, which is a column vector because D[N] = 1, using a factor
equivalent to an almost-gauge transformation where all G's are the identity, except
G[N], which represents the factor. "Almost" means G[0] =/= G[N] (the norm is allowed to change).
Requires that l is up to date.
Note that this generally breaks canonical form
because we change r[N - 1] by the same factor.
By default, this also updates the r matrices to reflect the change in A[N].
Parameters
----------
update_r : bool (True)
Whether to call update all the r matrices to reflect the change.
"""
norm = self.l[self.N][0, 0].real
G_N = 1 / sp.sqrt(norm)
self.A[self.N] *= G_N
self.l[self.N][:] *= 1 / norm
if update_r:
for n in xrange(self.N):
self.r[n] *= 1 / norm
def restore_CF(self):
if self.canonical_form == 'right':
self.restore_RCF()
else:
self.restore_LCF()
def restore_RCF(self, update_l=True, normalize=True, diag_l=True):
"""Use a gauge-transformation to restore right canonical form.
Implements the conditions for right canonical form from sub-section
3.1, theorem 1 of arXiv:quant-ph/0608197v2.
This performs two 'almost' gauge transformations, where the 'almost'
means we allow the norm to vary (if "normalize" = True).
The last step (A[1]) is done diffently to the others since G[0],
the gauge-transf. matrix, is just a number, which can be found more
efficiently and accurately without using matrix methods.
The last step (A[1]) is important because, if we have successfully made
r[1] = 1 in the previous steps, it fully determines the normalization
of the state via r[0] ( = l[N]).
Optionally (normalize=False), the function will not attempt to make
A[1] satisfy the orthonorm. condition, and will take G[0] = 1 = G[N],
thus performing a pure gauge-transformation, but not ensuring complete
canonical form.
It is also possible to begin the process from a site n other than N,
in case the sites > n are known to be in the desired form already.
It is also possible to skip the diagonalization of the l's, such that
only the right orthonormalization condition (r_n = eye) is met.
By default, the l's are updated even if diag_l=False.
Parameters
----------
update_l : bool
Whether to call calc_l() after completion (defaults to True)
normalize : bool
Whether to also attempt to normalize the state.
diag_l : bool
Whether to put l in diagonal form (defaults to True)
"""
start = self.N
G_n_i = sp.eye(self.D[start], dtype=self.typ) #This is actually just the number 1
for n in xrange(start, 1, -1):
self.r[n - 1], G_n, G_n_i = tm.restore_RCF_r(self.A[n], self.r[n],
G_n_i, sc_data=('site', n),
zero_tol=self.zero_tol,
sanity_checks=self.sanity_checks)
#Now do A[1]...
#Apply the remaining G[1]^-1 from the previous step.
for s in xrange(self.q[1]):
self.A[1][s] = m.mmul(self.A[1][s], G_n_i)
#Now finish off
tm.eps_r_noop_inplace(self.r[1], self.A[1], self.A[1], out=self.r[0])
if normalize:
G0 = 1. / sp.sqrt(self.r[0].squeeze().real)
self.A[1] *= G0
self.r[0][:] = 1
if self.sanity_checks:
r0 = tm.eps_r_noop(self.r[1], self.A[1], self.A[1])
if not sp.allclose(r0, 1, atol=1E-12, rtol=1E-12):
log.warning("Sanity Fail in restore_RCF!: r_0 is bad / norm failure")
if diag_l:
G_nm1 = sp.eye(self.D[0], dtype=self.typ)
for n in xrange(1, self.N):
self.l[n], G_nm1, G_nm1_i = tm.restore_RCF_l(self.A[n],
self.l[n - 1],
G_nm1,
self.sanity_checks)
#Apply remaining G_Nm1 to A[N]
n = self.N
for s in xrange(self.q[n]):
self.A[n][s] = m.mmul(G_nm1, self.A[n][s])
#Deal with final, scalar l[N]
tm.eps_l_noop_inplace(self.l[n - 1], self.A[n], self.A[n], out=self.l[n])
if self.sanity_checks:
if not sp.allclose(self.l[self.N].real, 1, atol=1E-12, rtol=1E-12):
log.warning("Sanity Fail in restore_RCF!: l_N is bad / norm failure")
log.warning("l_N = %s", self.l[self.N].squeeze().real)
for n in xrange(1, self.N + 1):
r_nm1 = tm.eps_r_noop(self.r[n], self.A[n], self.A[n])
#r_nm1 = tm.eps_r_noop(m.eyemat(self.D[n], self.typ), self.A[n], self.A[n])
if not sp.allclose(r_nm1, self.r[n - 1], atol=1E-11, rtol=1E-11):
log.warning("Sanity Fail in restore_RCF!: r_%u is bad (off by %g)", n, la.norm(r_nm1 - self.r[n - 1]))
elif update_l:
self.calc_l()
def restore_LCF(self):
Gm1 = sp.eye(self.D[0], dtype=self.typ) #This is actually just the number 1
for n in xrange(1, self.N):
self.l[n], G, Gi = tm.restore_LCF_l(self.A[n], self.l[n - 1], Gm1,
zero_tol=self.zero_tol,
sanity_checks=self.sanity_checks)
Gm1 = G
#Now do A[N]...
#Apply the remaining G[N - 1] from the previous step.
for s in xrange(self.q[self.N]):
self.A[self.N][s] = Gm1.dot(self.A[self.N][s])
#Now finish off
tm.eps_l_noop_inplace(self.l[self.N - 1], self.A[self.N], self.A[self.N], out=self.l[self.N])
#normalize
GNi = 1. / sp.sqrt(self.l[self.N].squeeze().real)
self.A[self.N] *= GNi
self.l[self.N][:] = 1
if self.sanity_checks:
lN = tm.eps_l_noop(self.l[self.N - 1], self.A[self.N], self.A[self.N])
if not sp.allclose(lN, 1, atol=1E-12, rtol=1E-12):
log.warning("Sanity Fail in restore_LCF!: l_N is bad / norm failure")
#diag r
Gi = sp.eye(self.D[self.N], dtype=self.typ)
for n in xrange(self.N, 1, -1):
self.r[n - 1], Gm1, Gm1_i = tm.restore_LCF_r(self.A[n], self.r[n],
Gi, self.sanity_checks)
Gi = Gm1_i
#Apply remaining G1i to A[1]
for s in xrange(self.q[1]):
self.A[1][s] = self.A[1][s].dot(Gi)
#Deal with final, scalar r[0]
tm.eps_r_noop_inplace(self.r[1], self.A[1], self.A[1], out=self.r[0])
if self.sanity_checks:
if not sp.allclose(self.r[0], 1, atol=1E-12, rtol=1E-12):
log.warning("Sanity Fail in restore_LCF!: r_0 is bad / norm failure")
log.warning("r_0 = %s", self.r[0].squeeze().real)
for n in xrange(1, self.N + 1):
l = tm.eps_l_noop(self.l[n - 1], self.A[n], self.A[n])
if not sp.allclose(l, self.l[n], atol=1E-11, rtol=1E-11):
log.warning("Sanity Fail in restore_LCF!: l_%u is bad (off by %g)", n, la.norm(l - self.l[n]))
log.warning((l - self.l[n]).diagonal().real)
def auto_truncate(self, update=True, zero_tol=None, return_update_data=False):
"""Automatically reduces the bond-dimension in case of rank-deficiency.
Canonical form is required. Always perform self.restore_CF() first!
Parameters
----------
update : bool (True)
Whether to call self.update() after truncation.
zero_tol : float
Tolerance for interpretation of values as zero.
return_update_data : bool
Whether to return additional data needed to perform a minimal update.
Returns
-------
truncated : bool
Whether truncation was performed (if return_update_data == False).
data : stuff
Additional data needed by self._update_after_truncate() (if return_update_data == True).
"""
if zero_tol is None:
zero_tol = self.zero_tol
new_D = self.D.copy()
if self.canonical_form == 'right':
for n in xrange(1, self.N + 1):
try:
ldiag = self.l[n].diag
except AttributeError:
ldiag = self.l[n].diagonal()
new_D[n] = sp.count_nonzero(abs(ldiag) > zero_tol)
else:
for n in xrange(1, self.N + 1):
try:
rdiag = self.r[n].diag
except AttributeError:
rdiag = self.r[n].diagonal()
new_D[n] = sp.count_nonzero(abs(rdiag) > zero_tol)
if not sp.all(new_D == self.D):
data = self.truncate(new_D, update=update, return_update_data=return_update_data)
if update:
self.update()
if return_update_data:
return data
else:
return True
else:
return False
def truncate(self, new_D, update=True, return_update_data=False):
"""Reduces the bond-dimensions by truncating the least-significant Schmidt vectors.
The parameters must be in canonical form to ensure that
the discarded parameters correspond to the smallest Schmidt
coefficients. Always perform self.restore_RCF() first!
Each bond-dimension can either be reduced or left unchanged.
The resulting parameters self.A will not generally have canonical
form after truncation.
Parameters
----------
new_D : list or ndarray of int
The new bond-dimensions in a vector of length N + 1.
update : bool (True)
Whether to call self.update() after truncation (turn off if you plan to do it yourself).
return_update_data : bool
Whether to return additional data needed to perform a minimal update.
Returns
-------
data : stuff
Additional data needed by self._update_after_truncate() (if return_update_data == True).
"""
new_D = sp.array(new_D)
assert new_D.shape == self.D.shape, "new_D must have same shape as self.D"
assert sp.all(new_D <= self.D), "new bond-dimensions must be less than or equal to current dimensions"
last_trunc = sp.argwhere(self.D - new_D).max()
first_trunc = sp.argwhere(self.D - new_D).min()
tmp_A = self.A
old_l = self.l
old_r = self.r
self.D = new_D
self._init_arrays()
for n in xrange(1, self.N + 1):
self.A[n][:] = tmp_A[n][:, -self.D[n - 1]:, -self.D[n]:]
if update:
self.update()
if return_update_data:
return last_trunc, old_l, first_trunc, old_r
def _update_after_truncate(self, n_last_trunc, old_l, n_first_trunc, old_r):
if self.canonical_form == 'right':
self.r[0][0, 0] = 1
for n in xrange(1, self.N):
self.l[n] = m.simple_diag_matrix(old_l[n].diag[-self.D[n]:], dtype=self.typ)
self.l[self.N][0, 0] = 1
for n in xrange(self.N - 1, n_last_trunc - 1, - 1):
self.r[n] = m.eyemat(self.D[n], dtype=self.typ)
self.calc_r(n_high=n_last_trunc - 1)
else:
self.l[0][0, 0] = 1
for n in xrange(1, self.N):
self.r[n] = m.simple_diag_matrix(old_r[n].diag[-self.D[n]:], dtype=self.typ)
self.r[0][0, 0] = 1
for n in xrange(1, n_first_trunc):
self.l[n] = m.eyemat(self.D[n], dtype=self.typ)
self.calc_l(n_low=n_first_trunc)
self.simple_renorm()
def check_RCF(self):
"""Tests for right canonical form.
Uses the criteria listed in sub-section 3.1, theorem 1 of arXiv:quant-ph/0608197v2.
This is a consistency check mainly intended for debugging purposes.
FIXME: The tolerances appear to be too tight!
Returns
-------
(rnsOK, ls_trOK, ls_pos, ls_diag, normOK) : tuple of bool
rnsOK: Right orthonormalization is fullfilled (self.r[n] = eye)
ls_trOK: all self.l[n] have trace 1
ls_pos: all self.l[n] are positive-definite
ls_diag: all self.l[n] are diagonal
normOK: the state it normalized
"""
rnsOK = True
ls_trOK = True
ls_herm = True
ls_pos = True
ls_diag = True
for n in xrange(1, self.N + 1):
rnsOK = rnsOK and sp.allclose(self.r[n], sp.eye(self.r[n].shape[0]), atol=self.eps*2, rtol=0)
ls_herm = ls_herm and sp.allclose(self.l[n] - m.H(self.l[n]), 0, atol=self.eps*2)
ls_trOK = ls_trOK and sp.allclose(sp.trace(self.l[n]), 1, atol=self.eps*1000, rtol=0)
ls_pos = ls_pos and all(la.eigvalsh(self.l[n]) > 0)
ls_diag = ls_diag and sp.allclose(self.l[n], sp.diag(self.l[n].diagonal()))
normOK = sp.allclose(self.l[self.N], 1., atol=self.eps*1000, rtol=0)
return (rnsOK, ls_trOK, ls_pos, ls_diag, normOK)
def expect_1s(self, op, n):
"""Computes the expectation value of a single-site operator.
The operator should be a q[n] x q[n] matrix or generating function
such that op[s, t] or op(s, t) equals <s|op|t>.
The state must be up-to-date -- see self.update()!
Parameters
----------
op : ndarray or callable
The operator.
n : int
The site number (1 <= n <= N).
Returns
-------
expval : floating point number
The expectation value (data type may be complex)
"""
if callable(op):
op = sp.vectorize(op, otypes=[sp.complex128])
op = sp.fromfunction(op, (self.q[n], self.q[n]))
res = tm.eps_r_op_1s(self.r[n], self.A[n], self.A[n], op)
return m.adot(self.l[n - 1], res)
def expect_2s(self, op, n):
"""Computes the expectation value of a nearest-neighbour two-site operator.
The operator should be a q[n] x q[n + 1] x q[n] x q[n + 1] array
such that op[s, t, u, v] = <st|op|uv> or a function of the form
op(s, t, u, v) = <st|op|uv>.
The state must be up-to-date -- see self.update()!
Parameters
----------
op : ndarray or callable
The operator array or function.
n : int
The leftmost site number (operator acts on n, n + 1).
Returns
-------
expval : floating point number
The expectation value (data type may be complex)
"""
A = self.A[n]
Ap1 = self.A[n + 1]
AA = tm.calc_AA(A, Ap1)
if callable(op):
op = sp.vectorize(op, otypes=[sp.complex128])
op = sp.fromfunction(op, (A.shape[0], Ap1.shape[0], A.shape[0], Ap1.shape[0]))
C = tm.calc_C_mat_op_AA(op, AA)
res = tm.eps_r_op_2s_C12_AA34(self.r[n + 1], C, AA)
return m.adot(self.l[n - 1], res)
def expect_3s(self, op, n):
"""Computes the expectation value of a nearest-neighbour three-site operator.
The operator should be a q[n] x q[n + 1] x q[n + 2] x q[n] x
q[n + 1] x q[n + 2] array such that op[s, t, u, v, w, x] =
<stu|op|vwx> or a function of the form op(s, t, u, v, w, x) =
<stu|op|vwx>.
The state must be up-to-date -- see self.update()!
Parameters
----------
op : ndarray or callable
The operator array or function.
n : int
The leftmost site number (operator acts on n, n + 1, n + 2).
Returns
-------
expval : floating point number
The expectation value (data type may be complex)
"""
A = self.A[n]
Ap1 = self.A[n + 1]
Ap2 = self.A[n + 2]
AAA = tm.calc_AAA(A, Ap1, Ap2)
if callable(op):
op = sp.vectorize(op, otypes=[sp.complex128])
op = sp.fromfunction(op, (A.shape[0], Ap1.shape[0], Ap2.shape[0],
A.shape[0], Ap1.shape[0], Ap2.shape[0]))
C = tm.calc_C_3s_mat_op_AAA(op, AAA)
res = tm.eps_r_op_3s_C123_AAA456(self.r[n + 2], C, AAA)
return m.adot(self.l[n - 1], res)
def expect_1s_1s(self, op1, op2, n1, n2):
"""Computes the expectation value of two single site operators acting
on two different sites.
The result is < op1 op2 >.
See expect_1s().
Requires n1 < n2.
The state must be up-to-date -- see self.update()!
Parameters
----------
op1 : ndarray or callable
The first operator, acting on the first site.
op2 : ndarray or callable
The second operator, acting on the second site.
n1 : int
The site number of the first site.
n2 : int
The site number of the second site (must be > n1).
Returns
-------
expval : floating point number
The expectation value (data type may be complex)
"""
if callable(op1):
op1 = sp.vectorize(op1, otypes=[sp.complex128])
op1 = sp.fromfunction(op1, (self.q[n1], self.q[n1]))
if callable(op2):
op2 = sp.vectorize(op2, otypes=[sp.complex128])
op2 = sp.fromfunction(op2, (self.q[n2], self.q[n2]))
r_n = tm.eps_r_op_1s(self.r[n2], self.A[n2], self.A[n2], op2)
for n in reversed(xrange(n1 + 1, n2)):
r_n = tm.eps_r_noop(r_n, self.A[n], self.A[n])
r_n = tm.eps_r_op_1s(r_n, self.A[n1], self.A[n1], op1)
return m.adot(self.l[n1 - 1], r_n)
def density_1s(self, n):
"""Returns a reduced density matrix for a single site.
The site number basis is used: rho[s, t]
with 0 <= s, t < q[n].
The state must be up-to-date -- see self.update()!
Parameters
----------
n1 : int
The site number.
Returns
-------
rho : ndarray
Reduced density matrix in the number basis.
"""
rho = sp.empty((self.q[n], self.q[n]), dtype=sp.complex128)
r_n = self.r[n]
r_nm1 = sp.empty_like(self.r[n - 1])
for s in xrange(self.q[n]):
for t in xrange(self.q[n]):
r_nm1 = m.mmul(self.A[n][t], r_n, m.H(self.A[n][s]))
rho[s, t] = m.adot(self.l[n - 1], r_nm1)
return rho
def density_2s(self, n1, n2):
"""Returns a reduced density matrix for a pair of (seperated) sites.
The site number basis is used: rho[s * q[n1] + u, t * q[n1] + v]
with 0 <= s, t < q[n1] and 0 <= u, v < q[n2].
The state must be up-to-date -- see self.update()!
Parameters
----------
n1 : int
The site number of the first site.
n2 : int
The site number of the second site (must be > n1).
Returns
-------
rho : ndarray
Reduced density matrix in the number basis.
"""
rho = sp.empty((self.q[n1] * self.q[n2], self.q[n1] * self.q[n2]), dtype=sp.complex128)
for s2 in xrange(self.q[n2]):
for t2 in xrange(self.q[n2]):
r_n2 = m.mmul(self.A[n2][t2], self.r[n2], m.H(self.A[n2][s2]))
r_n = r_n2
for n in reversed(xrange(n1 + 1, n2)):
r_n = tm.eps_r_noop(r_n, self.A[n], self.A[n])
for s1 in xrange(self.q[n1]):
for t1 in xrange(self.q[n1]):
r_n1 = m.mmul(self.A[n1][t1], r_n, m.H(self.A[n1][s1]))
tmp = m.adot(self.l[n1 - 1], r_n1)
rho[s1 * self.q[n1] + s2, t1 * self.q[n1] + t2] = tmp
return rho
def apply_op_1s(self, op, n, do_update=True):
"""Applies a single-site operator to a single site.
By default, this performs self.update(), which also restores
state normalization.
Parameters
----------
op : ndarray or callable
The single-site operator. See self.expect_1s().
n: int
The site to apply the operator to.
do_update : bool
Whether to update after applying the operator.
"""
if callable(op):
op = sp.vectorize(op, otypes=[sp.complex128])
op = sp.fromfunction(op, (self.q[n], self.q[n]))
newAn = sp.zeros_like(self.A[n])
for s in xrange(self.q[n]):
for t in xrange(self.q[n]):
newAn[s] += self.A[n][t] * op[s, t]
self.A[n] = newAn
if do_update:
self.update()
def save_state(self, file):
"""Saves the parameter tensors self.A to a file.
Uses numpy binary format.
Parameters
----------
file ; path or file
The file to save the state into.
"""
sp.save(file, self.A)
def load_state(self, file):
"""Loads the parameter tensors self.A from a file.
The saved state must contain the right number of tensors with
the correct shape corresponding to self.N and self.q.
self.D will be recovered from the saved state.
Parameters
----------
file ; path or file
The file to load the state from.
"""
tmp_A = sp.load(file)
self.D[0] = 1
for n in xrange(self.N):
self.D[n + 1] = tmp_A[n + 1].shape[2]
self._init_arrays()
self.A = tmp_A
|
s-m-i-t-a/flask-musers | refs/heads/master | tests/test_user_model.py | 1 | import six
import pytest
from mock import patch, call, MagicMock, Mock
from blinker import signal
from bson.objectid import ObjectId
from mongoengine.errors import NotUniqueError
from flask_musers.models import (
User,
UserError,
is_allowed,
NotAllowedError,
EmailNotFound,
InvalidPassword,
validate_password
)
from tests import validator
class TestUserModel(object):
@pytest.mark.usefixtures("db")
def test_user_register(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
User.register(email=email, password=password, activated=True)
u = User.objects.first()
assert u.email == email
assert u.check_password(password)
assert u.activated
@pytest.mark.usefixtures("db")
def test_user_register_return_new_user_object(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
u = User.register(email=email, password=password, activated=True)
assert u.email == email
assert u.check_password(password)
assert u.activated
@pytest.mark.usefixtures("db")
def test_raise_error_when_user_register_again(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
User.register(email=email, password=password, activated=True)
with pytest.raises(NotUniqueError):
User.register(email=email, password=password, activated=True)
@pytest.mark.usefixtures("db")
def test_get_id_return_string_when_object_is_saved(self):
u = User.register(email='nevim@nevim.cz', password='Jedna_dva_3', activated=True)
uid = str(u.pk)
assert uid == u.get_id()
@pytest.mark.usefixtures("app")
def test_get_id_return_none_when_is_unsaved(self):
u = User(email='nevim@nevim.cz', activated=True)
u.set_password('Jedna_dva_3')
assert u.get_id() is None
@pytest.mark.usefixtures("db")
def test_active_queryset_returns_only_active_users(self):
for i in range(20):
User.register(email='user%d@foo.cz' % i, password='pass]W_%d' % i, activated=i % 2 == 0)
active_users = User.active.all()
assert len(active_users) == 10
assert all([u.activated for u in active_users])
@pytest.mark.usefixtures("app")
@patch('flask_musers.models.pbkdf2_sha256.encrypt')
def test_encrypt_user_password(self, mock_encrypt):
password = 'Nevimvim_)12123'
u = User()
u.set_password(password)
assert mock_encrypt.called
kall = call(password)
assert mock_encrypt.call_args == kall
assert u._password == mock_encrypt.return_value
@patch('flask_musers.models.pbkdf2_sha256.encrypt')
@patch('flask_musers.models.validate_password')
def test_validate_password(self, mock_validate, mock_encrypt):
password = 'Nevimvim_)12123'
u = User()
u.set_password(password)
assert mock_validate.called
assert mock_validate.call_args == call(password)
def test_user_cant_be_anonymous(self):
u = User()
assert not u.is_anonymous()
def test_user_is_always_authenticated(self):
u = User()
assert u.is_authenticated()
def test_is_active_when_user_is_activated(self):
u = User(activated=True)
assert u.is_active()
def test_object_text_representation(self):
email = 'jozin@zbazin.cz'
u = User(email=email)
assert str(u) == email
assert repr(u) == email
@pytest.mark.usefixtures("db")
def test_get_user_return_only_active_user(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
User.register(email=email, password=password, activated=False)
with pytest.raises(UserError):
User.get_user(email=email, password=password)
def test_get_user_raise_error_when_user_not_found(self):
# raise UserError when user not found
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
with pytest.raises(UserError):
User.get_user(email=email, password=password)
@pytest.mark.usefixtures("db")
def test_get_user_raise_error_when_password_is_wrong(self):
# raise UserError when user not found
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
User.register(email=email, password=password, activated=True)
with pytest.raises(UserError):
User.get_user(email=email, password='asdasd')
@pytest.mark.usefixtures("db")
def test_get_user_return_user(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
ur = User.register(email=email, password=password, activated=True)
u = User.get_user(email=email, password=password)
assert ur.pk == u.pk
@pytest.mark.usefixtures("db")
def test_get_active_user_by_pk_or_none_return_active_user(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
ur = User.register(email=email, password=password, activated=True)
u = User.get_active_user_by_pk_or_none(str(ur.pk))
assert isinstance(u, User)
assert u.pk == ur.pk
@pytest.mark.usefixtures("db")
def test_get_active_user_by_pk_or_none_return_none_when_user_is_inactive(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
ur = User.register(email=email, password=password, activated=False)
u = User.get_active_user_by_pk_or_none(str(ur.pk))
assert u is None
@pytest.mark.usefixtures("db")
def test_get_active_user_by_pk_or_none_return_none_when_user_dont_exists(self):
oid = ObjectId()
u = User.get_active_user_by_pk_or_none(str(oid))
assert u is None
@pytest.mark.usefixtures("db")
def test_change_email(self):
email = 'jozin@zbazin.cz'
new_email = 'new@mail.com'
password = 'Nevimvim_)12123'
user = User.register(email=email, password=password, activated=True)
assert user.email == email
user.change_email(new_email, password=password)
user = User.get_active_user_by_pk_or_none(str(user.pk))
assert user.email == new_email
@pytest.mark.usefixtures("db")
def test_emit_signal_after_email_change(self):
email = 'jozin@zbazin.cz'
new_email = 'new@mail.com'
password = 'Nevimvim_)12123'
user = User.register(email=email, password=password, activated=True)
changed = signal('musers-email-changed')
@changed.connect
def catch_signal(user, data):
catch_signal._called = True
assert user.email == new_email
catch_signal._called = False
user.change_email(new_email, password=password)
assert catch_signal._called, "Signal has not been captured"
@pytest.mark.usefixtures("db")
def test_change_email_signal_contains_new_and_old_email(self):
email = 'jozin@zbazin.cz'
new_email = 'new@mail.com'
password = 'Nevimvim_)12123'
user = User.register(email=email, password=password, activated=True)
changed = signal('musers-email-changed')
@changed.connect
def catch_signal(user, data):
catch_signal._called = True
assert user.email == new_email
assert data['new'] == new_email
assert data['old'] == email
catch_signal._called = False
user.change_email(new_email, password=password)
assert catch_signal._called, "Signal has not been captured"
@pytest.mark.usefixtures("db")
def test_change_password(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
new_password = 'new_passW0rD'
user = User.register(email=email, password=password, activated=True)
user.change_password(new_password, password=password)
user = User.get_active_user_by_pk_or_none(str(user.pk))
assert user.check_password(new_password)
@pytest.mark.usefixtures("db")
def test_emit_signal_after_password_change(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
new_password = 'new_passW0rD'
user = User.register(email=email, password=password, activated=True)
changed = signal('musers-password-changed')
catch = MagicMock('catch_signal')
changed.connect(catch)
user.change_password(new_password, password=password)
assert catch.called, "Signal has not been captured"
assert catch.call_args == call(user)
@pytest.mark.usefixtures("db")
def test_find_by_email_return_user_when_found(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
user = User.register(email=email, password=password, activated=True)
u = User.get_by_email(email)
assert u.pk == user.pk
assert u.email == user.email
assert u._password == user._password
@pytest.mark.usefixtures("db")
def test_raise_email_not_found(self):
with pytest.raises(EmailNotFound):
User.get_by_email('bad@email.com')
class TestIsAllowedDecorator(object):
@pytest.mark.usefixtures("db")
def test_allow_call_function(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
@is_allowed
def f(self):
return self.get_id()
User.f = f
user = User.register(email=email, password=password, activated=True)
assert user.f(password=password) == user.get_id()
@pytest.mark.usefixtures("db")
def test_call_not_allowed(self):
email = 'jozin@zbazin.cz'
password = 'Nevimvim_)12123'
@is_allowed
def f(self):
return self.get_id()
User.f = f
user = User.register(email=email, password=password, activated=True)
with pytest.raises(NotAllowedError):
user.f(password='bad password')
class TestValidatePassword(object):
def test_use_custom_validator_and_raise_error(self, app):
app.config['MUSERS_PASSWORD_VALIDATOR'] = 'tests.bad_validator'
with pytest.raises(InvalidPassword) as excinfo:
validate_password('pass')
assert excinfo.value.message == 'Error'
def test_use_custom_validator(self, app):
app.config['MUSERS_PASSWORD_VALIDATOR'] = 'tests.validator'
validate_password('pass')
assert validator.called
assert validator.call_args == call('pass')
|
nkgilley/home-assistant | refs/heads/dev | tests/components/tado/test_config_flow.py | 6 | """Test the Tado config flow."""
import requests
from homeassistant import config_entries, setup
from homeassistant.components.tado.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
def _get_mock_tado_api(getMe=None):
mock_tado = MagicMock()
if isinstance(getMe, Exception):
type(mock_tado).getMe = MagicMock(side_effect=getMe)
else:
type(mock_tado).getMe = MagicMock(return_value=getMe)
return mock_tado
async def test_form(hass):
"""Test we can setup though the user path."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tado_api = _get_mock_tado_api(getMe={"homes": [{"id": 1, "name": "myhome"}]})
with patch(
"homeassistant.components.tado.config_flow.Tado", return_value=mock_tado_api,
), patch(
"homeassistant.components.tado.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.tado.async_setup_entry", return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "create_entry"
assert result2["title"] == "myhome"
assert result2["data"] == {
"username": "test-username",
"password": "test-password",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_import(hass):
"""Test we can import."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_tado_api = _get_mock_tado_api(getMe={"homes": [{"id": 1, "name": "myhome"}]})
with patch(
"homeassistant.components.tado.config_flow.Tado", return_value=mock_tado_api,
), patch(
"homeassistant.components.tado.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.tado.async_setup_entry", return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"username": "test-username", "password": "test-password"},
)
assert result["type"] == "create_entry"
assert result["title"] == "myhome"
assert result["data"] == {
"username": "test-username",
"password": "test-password",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response_mock = MagicMock()
type(response_mock).status_code = 401
mock_tado_api = _get_mock_tado_api(getMe=requests.HTTPError(response=response_mock))
with patch(
"homeassistant.components.tado.config_flow.Tado", return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response_mock = MagicMock()
type(response_mock).status_code = 500
mock_tado_api = _get_mock_tado_api(getMe=requests.HTTPError(response=response_mock))
with patch(
"homeassistant.components.tado.config_flow.Tado", return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_no_homes(hass):
"""Test we handle no homes error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tado_api = _get_mock_tado_api(getMe={"homes": []})
with patch(
"homeassistant.components.tado.config_flow.Tado", return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "no_homes"}
async def test_form_homekit(hass):
"""Test that we abort from homekit if tado is already setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "homekit"},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "form"
assert result["errors"] == {}
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "AA:BB:CC:DD:EE:FF"
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_USERNAME: "mock", CONF_PASSWORD: "mock"}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "homekit"},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "abort"
|
MediaKraken/MediaKraken_Deployment | refs/heads/master | source/castpy/cast.py | 1 | #!/usr/bin/python
# Generate a simple web slideshow
# for use with a Chromecast.
#
# Copyright (c) 2014 by Jim Lawless
# See MIT/X11 license at
# http://www.mailsend-online.com/license2014.php
#
import os
import string
import SimpleHTTPServer
import SocketServer
delay_millis = "10000"
images = os.listdir('img')
html = ''
# Build an HTML snippet that contains
# a JavaScript list of string-literals.
for img in images:
html = html + '\"img/' + img + '\"'
# Place a comma on the end
# unless this is the last item in
# the list
if img != images[-1]:
html = html + ','
with open('template.htm', "r") as tplfile:
payload = tplfile.read()
# Replace $$1 and $$2 with the delay
# in milliseconds and generated list
# of images. Write the output to
# index.html
payload = string.replace(payload, "$$1", delay_millis)
payload = string.replace(payload, "$$2", html)
with open("index.html", "w") as indexfile:
indexfile.write(payload)
# Now, start serving up pages
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", 80), Handler)
print
"HTTP server running..."
httpd.serve_forever()
|
Distrotech/intellij-community | refs/heads/master | python/testData/refactoring/rename/renameGlobal_after.py | 83 | bar = []
def f1():
global bar
bar = [1, 2, 3]
def f2(x):
global bar
bar = bar + [x]
if 1 in bar:
return bar
def f3(x):
return bar + [x]
def f4(x):
global bar
return bar + [x]
def f5(foo):
return foo
|
acsone/server-tools | refs/heads/8.0 | profiler/controllers/__init__.py | 7 | # coding: utf-8
# License AGPL-3 or later (http://www.gnu.org/licenses/lgpl).
# Copyright 2014 Anybox <http://anybox.fr>
# Copyright 2016 Vauxoo (https://www.vauxoo.com) <info@vauxoo.com>
from . import main
|
PegasusWang/whirlwind | refs/heads/master | whirlwind/util/__init__.py | 2 | import re
class Util(object):
@staticmethod
def normalize(username):
if not username :
return None
#allow legal email address
name = username.strip().lower()
name = re.sub(r'[^a-z0-9\\.\\@_\\-~#]+', '', name)
name = re.sub('\\s+', '_',name)
#don't allow $ and . because they screw up the db.
name = name.replace(".", "")
name = name.replace("$", "")
return name |
Tastalian/pymanoid | refs/heads/master | doc/src/conf.py | 3 | # -*- coding: utf-8 -*-
#
# pymanoid documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 13 14:41:18 2017.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../examples'))
sys.path.insert(0, os.path.abspath('../../examples/contact_stability'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
# 'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymanoid'
copyright = u'2015–2019, Stéphane Caron'
author = u'Stéphane Caron'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'pymanoid v0.6.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymanoiddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + '.tex', project + u' Documentation', author,
'howto'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
|
joodo/davanoffi | refs/heads/master | board/migrations/0001_initial.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=140, null=True)),
('content', models.TextField(null=True)),
('image', models.ImageField(null=True, upload_to=b'board/image')),
('parent', models.ForeignKey(related_name='comments', to='board.Post', null=True)),
],
),
]
|
damdam-s/stock-logistics-workflow | refs/heads/8.0 | stock_ownership_availability_rules/model/product.py | 19 | # -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import models
class Product(models.Model):
_inherit = 'product.template'
def action_open_quants(self, cr, uid, ids, context=None):
result = super(Product, self).action_open_quants(cr, uid, ids, context)
result['context'] = (
"{'search_default_locationgroup': 1, "
"'search_default_ownergroup': 1, "
"'search_default_internal_loc': 1, "
"'search_default_without_reservation': 1}"
)
return result
|
eLBati/odoo | refs/heads/master | addons/website_blog/models/website_blog.py | 4 | # -*- coding: utf-8 -*-
from datetime import datetime
import difflib
import lxml
import random
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools.translate import _
class Blog(osv.Model):
_name = 'blog.blog'
_description = 'Blogs'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Blog Name', required=True),
'subtitle': fields.char('Blog Subtitle'),
'description': fields.text('Description'),
}
class BlogTag(osv.Model):
_name = 'blog.tag'
_description = 'Blog Tag'
_inherit = ['website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class BlogPost(osv.Model):
_name = "blog.post"
_description = "Blog Post"
_inherit = ['mail.thread', 'website.seo.metadata']
_order = 'id DESC'
def _compute_ranking(self, cr, uid, ids, name, arg, context=None):
res = {}
for blog_post in self.browse(cr, uid, ids, context=context):
age = datetime.now() - datetime.strptime(blog_post.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
res[blog_post.id] = blog_post.visits * (0.5+random.random()) / max(3, age.days)
return res
_columns = {
'name': fields.char('Title', required=True, translate=True),
'subtitle': fields.char('Sub Title', translate=True),
'author_id': fields.many2one('res.partner', 'Author'),
'background_image': fields.binary('Background Image', oldname='content_image'),
'blog_id': fields.many2one(
'blog.blog', 'Blog',
required=True, ondelete='cascade',
),
'tag_ids': fields.many2many(
'blog.tag', string='Tags',
),
'content': fields.html('Content', translate=True),
# website control
'website_published': fields.boolean(
'Publish', help="Publish on the website"
),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', '&', ('model', '=', self._name), ('type', '=', 'comment'), ('path', '=', False)
],
string='Website Messages',
help="Website communication history",
),
'history_ids': fields.one2many(
'blog.post.history', 'post_id',
'History', help='Last post modifications',
),
# creation / update stuff
'create_date': fields.datetime(
'Created on',
select=True, readonly=True,
),
'create_uid': fields.many2one(
'res.users', 'Author',
select=True, readonly=True,
),
'write_date': fields.datetime(
'Last Modified on',
select=True, readonly=True,
),
'write_uid': fields.many2one(
'res.users', 'Last Contributor',
select=True, readonly=True,
),
'visits': fields.integer('No of Views'),
'ranking': fields.function(_compute_ranking, string='Ranking', type='float'),
}
_defaults = {
'name': _('Blog Post Title'),
'subtitle': _('Subtitle'),
'author_id': lambda self, cr, uid, ctx=None: self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id,
}
def html_tag_nodes(self, html, attribute=None, tags=None, context=None):
""" Processing of html content to tag paragraphs and set them an unique
ID.
:return result: (html, mappin), where html is the updated html with ID
and mapping is a list of (old_ID, new_ID), where old_ID
is None is the paragraph is a new one. """
mapping = []
if not html:
return html, mapping
if tags is None:
tags = ['p']
if attribute is None:
attribute = 'data-unique-id'
counter = 0
# form a tree
root = lxml.html.fragment_fromstring(html, create_parent='div')
if not len(root) and root.text is None and root.tail is None:
return html, mapping
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if not node.tag in tags:
continue
ancestor_tags = [parent.tag for parent in node.iterancestors()]
if ancestor_tags:
ancestor_tags.pop()
ancestor_tags.append('counter_%s' % counter)
new_attribute = '/'.join(reversed(ancestor_tags))
old_attribute = node.get(attribute)
node.set(attribute, new_attribute)
mapping.append((old_attribute, counter))
counter += 1
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html, mapping
def _postproces_content(self, cr, uid, id, content=None, context=None):
if content is None:
content = self.browse(cr, uid, id, context=context).content
if content is False:
return content
content, mapping = self.html_tag_nodes(content, attribute='data-chatter-id', tags=['p'], context=context)
for old_attribute, new_attribute in mapping:
if not old_attribute:
continue
msg_ids = self.pool['mail.message'].search(cr, SUPERUSER_ID, [('path', '=', old_attribute)], context=context)
self.pool['mail.message'].write(cr, SUPERUSER_ID, msg_ids, {'path': new_attribute}, context=context)
return content
def create_history(self, cr, uid, ids, vals, context=None):
for i in ids:
history = self.pool.get('blog.post.history')
if vals.get('content'):
res = {
'content': vals.get('content', ''),
'post_id': i,
}
history.create(cr, uid, res)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, None, vals['content'], context=context)
create_context = dict(context, mail_create_nolog=True)
post_id = super(BlogPost, self).create(cr, uid, vals, context=create_context)
self.create_history(cr, uid, [post_id], vals, context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, None, vals['content'], context=context)
result = super(BlogPost, self).write(cr, uid, ids, vals, context)
self.create_history(cr, uid, ids, vals, context)
return result
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update({
'website_message_ids': [],
'website_published': False,
'website_published_datetime': False,
})
return super(BlogPost, self).copy(cr, uid, id, default=default, context=context)
class BlogPostHistory(osv.Model):
_name = "blog.post.history"
_description = "Blog Post History"
_order = 'id DESC'
_rec_name = "create_date"
_columns = {
'post_id': fields.many2one('blog.post', 'Blog Post'),
'summary': fields.char('Summary', select=True),
'content': fields.text("Content"),
'create_date': fields.datetime("Date"),
'create_uid': fields.many2one('res.users', "Modified By"),
}
def getDiff(self, cr, uid, v1, v2, context=None):
history_pool = self.pool.get('blog.post.history')
text1 = history_pool.read(cr, uid, [v1], ['content'])[0]['content']
text2 = history_pool.read(cr, uid, [v2], ['content'])[0]['content']
line1 = line2 = ''
if text1:
line1 = text1.splitlines(1)
if text2:
line2 = text2.splitlines(1)
if (not line1 and not line2) or (line1 == line2):
raise osv.except_osv(_('Warning!'), _('There are no changes in revisions.'))
diff = difflib.HtmlDiff()
return diff.make_table(line1, line2, "Revision-%s" % (v1), "Revision-%s" % (v2), context=True)
|
SmartInfrastructures/neutron | refs/heads/master | neutron/db/migration/alembic_migrations/versions/28c0ffb8ebbd_remove_mlnx_plugin.py | 47 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""remove mlnx plugin
Revision ID: 28c0ffb8ebbd
Revises: 408cfbf6923c
Create Date: 2014-12-08 23:58:49.288830
"""
# revision identifiers, used by Alembic.
revision = '28c0ffb8ebbd'
down_revision = '408cfbf6923c'
from alembic import op
def upgrade():
op.drop_table('mlnx_network_bindings')
op.drop_table('segmentation_id_allocation')
op.drop_table('port_profile')
|
zaccoz/odoo | refs/heads/8.0 | addons/account_budget/account_budget.py | 194 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# ---------------------------------------------------------
# Utils
# ---------------------------------------------------------
def strToDate(dt):
return date(int(dt[0:4]), int(dt[5:7]), int(dt[8:10]))
def strToDatetime(strdate):
return datetime.strptime(strdate, DEFAULT_SERVER_DATE_FORMAT)
# ---------------------------------------------------------
# Budgets
# ---------------------------------------------------------
class account_budget_post(osv.osv):
_name = "account.budget.post"
_description = "Budgetary Position"
_columns = {
'code': fields.char('Code', size=64, required=True),
'name': fields.char('Name', required=True),
'account_ids': fields.many2many('account.account', 'account_budget_rel', 'budget_id', 'account_id', 'Accounts'),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'general_budget_id', 'Budget Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
_order = "name"
class crossovered_budget(osv.osv):
_name = "crossovered.budget"
_description = "Budget"
_columns = {
'name': fields.char('Name', required=True, states={'done':[('readonly',True)]}),
'code': fields.char('Code', size=16, required=True, states={'done':[('readonly',True)]}),
'creating_user_id': fields.many2one('res.users', 'Responsible User'),
'validating_user_id': fields.many2one('res.users', 'Validate User', readonly=True),
'date_from': fields.date('Start Date', required=True, states={'done':[('readonly',True)]}),
'date_to': fields.date('End Date', required=True, states={'done':[('readonly',True)]}),
'state' : fields.selection([('draft','Draft'),('cancel', 'Cancelled'),('confirm','Confirmed'),('validate','Validated'),('done','Done')], 'Status', select=True, required=True, readonly=True, copy=False),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'crossovered_budget_id', 'Budget Lines', states={'done':[('readonly',True)]}, copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'state': 'draft',
'creating_user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
def budget_confirm(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'confirm'
})
return True
def budget_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'draft'
})
return True
def budget_validate(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'validate',
'validating_user_id': uid,
})
return True
def budget_cancel(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'cancel'
})
return True
def budget_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'done'
})
return True
class crossovered_budget_lines(osv.osv):
def _prac_amt(self, cr, uid, ids, context=None):
res = {}
result = 0.0
if context is None:
context = {}
account_obj = self.pool.get('account.account')
for line in self.browse(cr, uid, ids, context=context):
acc_ids = [x.id for x in line.general_budget_id.account_ids]
if not acc_ids:
raise osv.except_osv(_('Error!'),_("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name))
acc_ids = account_obj._get_children_and_consol(cr, uid, acc_ids, context=context)
date_to = line.date_to
date_from = line.date_from
if line.analytic_account_id.id:
cr.execute("SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date "
"between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND "
"general_account_id=ANY(%s)", (line.analytic_account_id.id, date_from, date_to,acc_ids,))
result = cr.fetchone()[0]
if result is None:
result = 0.00
res[line.id] = result
return res
def _prac(self, cr, uid, ids, name, args, context=None):
res={}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._prac_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _theo_amt(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
today = datetime.now()
if line.paid_date:
if strToDate(line.date_to) <= strToDate(line.paid_date):
theo_amt = 0.00
else:
theo_amt = line.planned_amount
else:
line_timedelta = strToDatetime(line.date_to) - strToDatetime(line.date_from)
elapsed_timedelta = today - (strToDatetime(line.date_from))
if elapsed_timedelta.days < 0:
# If the budget line has not started yet, theoretical amount should be zero
theo_amt = 0.00
elif line_timedelta.days > 0 and today < strToDatetime(line.date_to):
# If today is between the budget line date_from and date_to
theo_amt = (elapsed_timedelta.total_seconds() / line_timedelta.total_seconds()) * line.planned_amount
else:
theo_amt = line.planned_amount
res[line.id] = theo_amt
return res
def _theo(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._theo_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _perc(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.theoritical_amount <> 0.00:
res[line.id] = float((line.practical_amount or 0.0) / line.theoritical_amount) * 100
else:
res[line.id] = 0.00
return res
_name = "crossovered.budget.lines"
_description = "Budget Line"
_columns = {
'crossovered_budget_id': fields.many2one('crossovered.budget', 'Budget', ondelete='cascade', select=True, required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'general_budget_id': fields.many2one('account.budget.post', 'Budgetary Position',required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
'paid_date': fields.date('Paid Date'),
'planned_amount':fields.float('Planned Amount', required=True, digits_compute=dp.get_precision('Account')),
'practical_amount':fields.function(_prac, string='Practical Amount', type='float', digits_compute=dp.get_precision('Account')),
'theoritical_amount':fields.function(_theo, string='Theoretical Amount', type='float', digits_compute=dp.get_precision('Account')),
'percentage':fields.function(_perc, string='Percentage', type='float'),
'company_id': fields.related('crossovered_budget_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
_columns = {
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'analytic_account_id', 'Budget Lines'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Soya93/Extract-Refactoring | refs/heads/master | python/testData/intentions/splitIf_after.py | 83 | def foo():
if a + 2 > 3:
if b < 4:
#comment
a = a and b
b = 4
elif a > 20:
pass
elif b > 20:
b = a + 2
foo()
else:
b = a and b
a = 4
elif a > 20:
pass
elif b > 20:
b = a + 2
foo()
else:
b = a and b
a = 4 |
codasus/django-blogages | refs/heads/master | blogages/django/contrib/auth/forms.py | 96 | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.template import Context, loader
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.http import urlsafe_base64_encode
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and password.
"""
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct username and password. Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(
_("Your Web browser doesn't appear to have cookies enabled. "
"Cookies are required for logging in."))
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that an active user exists with the given e-mail address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(
email__iexact=email,
is_active=True
)
if len(self.users_cache) == 0:
raise forms.ValidationError(_("That e-mail address doesn't have an associated user account. Are you sure you've registered?"))
return email
def save(self, domain_override=None, email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator, from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the user
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
t = loader.get_template(email_template_name)
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(str(user.id)),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
send_mail(_("Password reset on %s") % site_name,
t.render(Context(c)), from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without
entering the old password
"""
new_password1 = forms.CharField(label=_("New password"), widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1', 'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
CTSRD-SOAAP/chromium-42.0.2311.135 | refs/heads/master | net/tools/testserver/testserver.py | 1 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP/FTP/TCP/UDP/BASIC_AUTH_PROXY/WEBSOCKET server used for
testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
By default, it listens on an ephemeral port and sends the port number back to
the originating process over a pipe. The originating process can specify an
explicit port if necessary.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
"""
import base64
import BaseHTTPServer
import cgi
import hashlib
import logging
import minica
import os
import json
import random
import re
import select
import socket
import SocketServer
import ssl
import struct
import sys
import threading
import time
import urllib
import urlparse
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR)))
# Temporary hack to deal with tlslite 0.3.8 -> 0.4.6 upgrade.
#
# TODO(davidben): Remove this when it has cycled through all the bots and
# developer checkouts or when http://crbug.com/356276 is resolved.
try:
os.remove(os.path.join(ROOT_DIR, 'third_party', 'tlslite',
'tlslite', 'utils', 'hmac.pyc'))
except Exception:
pass
# Append at the end of sys.path, it's fine to use the system library.
sys.path.append(os.path.join(ROOT_DIR, 'third_party', 'pyftpdlib', 'src'))
# Insert at the beginning of the path, we want to use our copies of the library
# unconditionally.
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'pywebsocket', 'src'))
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'tlslite'))
import mod_pywebsocket.standalone
from mod_pywebsocket.standalone import WebSocketServer
# import manually
mod_pywebsocket.standalone.ssl = ssl
import pyftpdlib.ftpserver
import tlslite
import tlslite.api
import echo_message
import testserver_base
SERVER_HTTP = 0
SERVER_FTP = 1
SERVER_TCP_ECHO = 2
SERVER_UDP_ECHO = 3
SERVER_BASIC_AUTH_PROXY = 4
SERVER_WEBSOCKET = 5
# Default request queue size for WebSocketServer.
_DEFAULT_REQUEST_QUEUE_SIZE = 128
class WebSocketOptions:
"""Holds options for WebSocketServer."""
def __init__(self, host, port, data_dir):
self.request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE
self.server_host = host
self.port = port
self.websock_handlers = data_dir
self.scan_dir = None
self.allow_handlers_outside_root_dir = False
self.websock_handlers_map_file = None
self.cgi_directories = []
self.is_executable_method = None
self.allow_draft75 = False
self.strict = True
self.use_tls = False
self.private_key = None
self.certificate = None
self.tls_client_auth = False
self.tls_client_ca = None
self.tls_module = 'ssl'
self.use_basic_auth = False
self.basic_auth_credential = 'Basic ' + base64.b64encode('test:test')
class RecordingSSLSessionCache(object):
"""RecordingSSLSessionCache acts as a TLS session cache and maintains a log of
lookups and inserts in order to test session cache behaviours."""
def __init__(self):
self.log = []
def __getitem__(self, sessionID):
self.log.append(('lookup', sessionID))
raise KeyError()
def __setitem__(self, sessionID, session):
self.log.append(('insert', sessionID))
class HTTPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that adds client
verification."""
pass
class OCSPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
BaseHTTPServer.HTTPServer):
"""This is a specialization of HTTPServer that serves an
OCSP response"""
def serve_forever_on_thread(self):
self.thread = threading.Thread(target = self.serve_forever,
name = "OCSPServerThread")
self.thread.start()
def stop_serving(self):
self.shutdown()
self.thread.join()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn,
testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that add https support and
client verification."""
def __init__(self, server_address, request_hander_class, pem_cert_and_key,
ssl_client_auth, ssl_client_cas, ssl_client_cert_types,
ssl_bulk_ciphers, ssl_key_exchanges, enable_npn,
record_resume_info, tls_intolerant,
tls_intolerance_type, signed_cert_timestamps,
fallback_scsv_enabled, ocsp_response, disable_session_cache):
self.cert_chain = tlslite.api.X509CertChain()
self.cert_chain.parsePemList(pem_cert_and_key)
# Force using only python implementation - otherwise behavior is different
# depending on whether m2crypto Python module is present (error is thrown
# when it is). m2crypto uses a C (based on OpenSSL) implementation under
# the hood.
self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key,
private=True,
implementations=['python'])
self.ssl_client_auth = ssl_client_auth
self.ssl_client_cas = []
self.ssl_client_cert_types = []
if enable_npn:
self.next_protos = ['http/1.1']
else:
self.next_protos = None
self.signed_cert_timestamps = signed_cert_timestamps
self.fallback_scsv_enabled = fallback_scsv_enabled
self.ocsp_response = ocsp_response
if ssl_client_auth:
for ca_file in ssl_client_cas:
s = open(ca_file).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.ssl_client_cas.append(x509.subject)
for cert_type in ssl_client_cert_types:
self.ssl_client_cert_types.append({
"rsa_sign": tlslite.api.ClientCertificateType.rsa_sign,
"dss_sign": tlslite.api.ClientCertificateType.dss_sign,
"ecdsa_sign": tlslite.api.ClientCertificateType.ecdsa_sign,
}[cert_type])
self.ssl_handshake_settings = tlslite.api.HandshakeSettings()
# Enable SSLv3 for testing purposes.
self.ssl_handshake_settings.minVersion = (3, 0)
if ssl_bulk_ciphers is not None:
self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers
if ssl_key_exchanges is not None:
self.ssl_handshake_settings.keyExchangeNames = ssl_key_exchanges
if tls_intolerant != 0:
self.ssl_handshake_settings.tlsIntolerant = (3, tls_intolerant)
self.ssl_handshake_settings.tlsIntoleranceType = tls_intolerance_type
if disable_session_cache:
self.session_cache = None
elif record_resume_info:
# If record_resume_info is true then we'll replace the session cache with
# an object that records the lookups and inserts that it sees.
self.session_cache = RecordingSSLSessionCache()
else:
self.session_cache = tlslite.api.SessionCache()
testserver_base.StoppableHTTPServer.__init__(self,
server_address,
request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
self.tlsConnection = tlsConnection
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache,
reqCert=self.ssl_client_auth,
settings=self.ssl_handshake_settings,
reqCAs=self.ssl_client_cas,
reqCertTypes=self.ssl_client_cert_types,
nextProtos=self.next_protos,
signedCertTimestamps=
self.signed_cert_timestamps,
fallbackSCSV=self.fallback_scsv_enabled,
ocspResponse = self.ocsp_response)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSAbruptCloseError:
# Ignore abrupt close.
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class FTPServer(testserver_base.ClientRestrictingServerMixIn,
pyftpdlib.ftpserver.FTPServer):
"""This is a specialization of FTPServer that adds client verification."""
pass
class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.TCPServer):
"""A TCP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.UDPServer):
"""A UDP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.UDPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class TestPageHandler(testserver_base.BasePageHandler):
# Class variables to allow for persistence state between page handler
# invocations
rst_limits = {}
fail_precondition = {}
def __init__(self, request, client_address, socket_server):
connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
get_handlers = [
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoHeaderCache,
self.EchoAllHandler,
self.ZipFileHandler,
self.FileHandler,
self.SetCookieHandler,
self.SetManyCookiesHandler,
self.ExpectAndSetCookieHandler,
self.SetHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ChunkedServerHandler,
self.ContentTypeHandler,
self.NoContentHandler,
self.ServerRedirectHandler,
self.CrossSiteRedirectHandler,
self.ClientRedirectHandler,
self.GetSSLSessionCacheHandler,
self.SSLManySmallRecords,
self.GetChannelID,
self.ClientCipherListHandler,
self.CloseSocketHandler,
self.RangeResetHandler,
self.DefaultResponseHandler]
post_handlers = [
self.EchoTitleHandler,
self.EchoHandler,
self.PostOnlyFileHandler,
self.EchoMultipartPostHandler] + get_handlers
put_handlers = [
self.EchoTitleHandler,
self.EchoHandler] + get_handlers
head_handlers = [
self.FileHandler,
self.DefaultResponseHandler]
self._mime_types = {
'crx' : 'application/x-chrome-extension',
'exe' : 'application/octet-stream',
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'js' : 'application/javascript',
'json': 'application/json',
'pdf' : 'application/pdf',
'txt' : 'text/plain',
'wav' : 'audio/wav',
'xml' : 'text/xml'
}
self._default_mime_type = 'text/html'
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, connect_handlers,
get_handlers, head_handlers,
post_handlers, put_handlers)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(_shortname, extension) = os.path.splitext(file_name.split("?")[0])
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
return self.EchoHeaderHelper("/echoheader")
def EchoHeaderCache(self):
"""This function echoes back the value of a specific request header while
allowing caching for 16 hours."""
return self.EchoHeaderHelper("/echoheadercache")
def EchoHeaderHelper(self, echo_header):
"""This function echoes back the value of the request header passed in."""
if not self._ShouldHandleRequest(echo_header):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
if echo_header == '/echoheadercache':
self.send_header('Cache-control', 'max-age=60000')
else:
self.send_header('Cache-control', 'no-cache')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') != 'chunked':
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
# Read the request body as chunks.
body = ""
while True:
line = self.rfile.readline()
length = int(line, 16)
if length == 0:
self.rfile.readline()
break
body += self.rfile.read(length)
self.rfile.read(2)
return body
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
_, _, _, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'status' in query_params:
self.send_response(int(query_params['status'][0]))
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def EchoMultipartPostHandler(self):
"""This handler echoes received multipart post data as json format."""
if not (self._ShouldHandleRequest("/echomultipartpost") or
self._ShouldHandleRequest("/searchbyimage")):
return False
content_type, parameters = cgi.parse_header(
self.headers.getheader('content-type'))
if content_type == 'multipart/form-data':
post_multipart = cgi.parse_multipart(self.rfile, parameters)
elif content_type == 'application/x-www-form-urlencoded':
raise Exception('POST by application/x-www-form-urlencoded is '
'not implemented.')
else:
post_multipart = {}
# Since the data can be binary, we encode them by base64.
post_multipart_base64_encoded = {}
for field, values in post_multipart.items():
post_multipart_base64_encoded[field] = [base64.b64encode(value)
for value in values]
result = {'POST_multipart' : post_multipart_base64_encoded}
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(json.dumps(result, indent=2, sort_keys=False))
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.wait_for_download = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def _ReplaceFileData(self, data, query_parameters):
"""Replaces matching substrings in a file.
If the 'replace_text' URL query parameter is present, it is expected to be
of the form old_text:new_text, which indicates that any old_text strings in
the file are replaced with new_text. Multiple 'replace_text' parameters may
be specified.
If the parameters are not present, |data| is returned.
"""
query_dict = cgi.parse_qs(query_parameters)
replace_text_values = query_dict.get('replace_text', [])
for replace_text_value in replace_text_values:
replace_text_args = replace_text_value.split(':')
if len(replace_text_args) != 2:
raise ValueError(
'replace_text must be of form old_text:new_text. Actual value: %s' %
replace_text_value)
old_text_b64, new_text_b64 = replace_text_args
old_text = base64.urlsafe_b64decode(old_text_b64)
new_text = base64.urlsafe_b64decode(new_text_b64)
data = data.replace(old_text, new_text)
return data
def ZipFileHandler(self):
"""This handler sends the contents of the requested file in compressed form.
Can pass in a parameter that specifies that the content length be
C - the compressed size (OK),
U - the uncompressed size (Non-standard, but handled),
S - less than compressed (OK because we keep going),
M - larger than compressed but less than uncompressed (an error),
L - larger than uncompressed (an error)
Example: compressedfiles/Picture_1.doc?C
"""
prefix = "/compressedfiles/"
if not self.path.startswith(prefix):
return False
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
if not query in ('C', 'U', 'S', 'M', 'L'):
return False
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
uncompressed_len = len(data)
f.close()
# Compress the data.
data = zlib.compress(data)
compressed_len = len(data)
content_length = compressed_len
if query == 'U':
content_length = uncompressed_len
elif query == 'S':
content_length = compressed_len / 2
elif query == 'M':
content_length = (compressed_len + uncompressed_len) / 2
elif query == 'L':
content_length = compressed_len + uncompressed_len
self.send_response(200)
self.send_header('Content-Type', 'application/msword')
self.send_header('Content-encoding', 'deflate')
self.send_header('Connection', 'close')
self.send_header('Content-Length', content_length)
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
self.wfile.write(data)
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def PostOnlyFileHandler(self):
"""This handler sends the contents of the requested file on a POST."""
prefix = urlparse.urljoin(self.server.file_root_url, 'post/')
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def _FileHandlerHelper(self, prefix):
request_body = ''
if self.command == 'POST' or self.command == 'PUT':
# Consume a request body if present.
request_body = self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
expected_body = query_dict.get('expected_body', [])
if expected_body and request_body not in expected_body:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
expected_headers = query_dict.get('expected_headers', [])
for expected_header in expected_headers:
header_name, expected_value = expected_header.split(':')
if self.headers.getheader(header_name) != expected_value:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
f.close()
data = self._ReplaceFileData(data, query)
old_protocol_version = self.protocol_version
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = file_path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
http_major, http_minor, status_code = re.findall(
'HTTP/(\d+).(\d+) (\d+)', response)[0]
self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor)
self.send_response(int(status_code))
for line in f:
header_values = re.findall('(\S+):\s*(.*)', line)
if len(header_values) > 0:
# "name: value"
name, value = header_values[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
range_header = self.headers.get('Range')
if range_header and range_header.startswith('bytes='):
# Note this doesn't handle all valid byte range_header values (i.e.
# left open ended ones), just enough for what we needed so far.
range_header = range_header[6:].split('-')
start = int(range_header[0])
if range_header[1]:
end = int(range_header[1])
else:
end = len(data) - 1
self.send_response(206)
content_range = ('bytes ' + str(start) + '-' + str(end) + '/' +
str(len(data)))
self.send_header('Content-Range', content_range)
data = data[start: end + 1]
else:
self.send_response(200)
self.send_header('Content-Type', self.GetMIMETypeFromName(file_path))
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Length', len(data))
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(data)
self.protocol_version = old_protocol_version
return True
def SetCookieHandler(self):
"""This handler just sets a cookie, for testing cookie handling."""
if not self._ShouldHandleRequest("/set-cookie"):
return False
query_char = self.path.find('?')
if query_char != -1:
cookie_values = self.path[query_char + 1:].split('&')
else:
cookie_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for cookie_value in cookie_values:
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for cookie_value in cookie_values:
self.wfile.write('%s' % cookie_value)
return True
def SetManyCookiesHandler(self):
"""This handler just sets a given number of cookies, for testing handling
of large numbers of cookies."""
if not self._ShouldHandleRequest("/set-many-cookies"):
return False
query_char = self.path.find('?')
if query_char != -1:
num_cookies = int(self.path[query_char + 1:])
else:
num_cookies = 0
self.send_response(200)
self.send_header('', 'text/html')
for _i in range(0, num_cookies):
self.send_header('Set-Cookie', 'a=')
self.end_headers()
self.wfile.write('%d cookies were sent' % num_cookies)
return True
def ExpectAndSetCookieHandler(self):
"""Expects some cookies to be sent, and if they are, sets more cookies.
The expect parameter specifies a required cookie. May be specified multiple
times.
The set parameter specifies a cookie to set if all required cookies are
preset. May be specified multiple times.
The data parameter specifies the response body data to be returned."""
if not self._ShouldHandleRequest("/expect-and-set-cookie"):
return False
_, _, _, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
cookies = set()
if 'Cookie' in self.headers:
cookie_header = self.headers.getheader('Cookie')
cookies.update([s.strip() for s in cookie_header.split(';')])
got_all_expected_cookies = True
for expected_cookie in query_dict.get('expect', []):
if expected_cookie not in cookies:
got_all_expected_cookies = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
if got_all_expected_cookies:
for cookie_value in query_dict.get('set', []):
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for data_value in query_dict.get('data', []):
self.wfile.write(data_value)
return True
def SetHeaderHandler(self):
"""This handler sets a response header. Parameters are in the
key%3A%20value&key2%3A%20value2 format."""
if not self._ShouldHandleRequest("/set-header"):
return False
query_char = self.path.find('?')
if query_char != -1:
headers_values = self.path[query_char + 1:].split('&')
else:
headers_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for header_value in headers_values:
header_value = urllib.unquote(header_value)
(key, value) = header_value.split(': ', 1)
self.send_header(key, value)
self.end_headers()
for header_value in headers_values:
self.wfile.write('%s' % header_value)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
expected_password = 'secret'
realm = 'testrealm'
set_cookie_if_challenged = False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'set-cookie-if-challenged' in query_params:
set_cookie_if_challenged = True
if 'password' in query_params:
expected_password = query_params['password'][0]
if 'realm' in query_params:
realm = query_params['realm'][0]
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != expected_password:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.send_header('Content-Type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
old_protocol_version = self.protocol_version
self.protocol_version = "HTTP/1.1"
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
elif url_path.endswith(".gif"):
# Using chrome/test/data/google/logo.gif as the test image
test_image_path = ['google', 'logo.gif']
gif_path = os.path.join(self.server.data_dir, *test_image_path)
if not os.path.isfile(gif_path):
self.send_error(404)
self.protocol_version = old_protocol_version
return True
f = open(gif_path, "rb")
data = f.read()
f.close()
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write(data)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
self.protocol_version = old_protocol_version
return True
def GetNonce(self, force_reset=False):
"""Returns a nonce that's stable per request path for the server's lifetime.
This is a fake implementation. A real implementation would only use a given
nonce a single time (hence the name n-once). However, for the purposes of
unittesting, we don't care about the security of the nonce.
Args:
force_reset: Iff set, the nonce will be changed. Useful for testing the
"stale" response.
"""
if force_reset or not self.server.nonce_time:
self.server.nonce_time = time.time()
return hashlib.md5('privatekey%s%d' %
(self.path, self.server.nonce_time)).hexdigest()
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication.
It just sends a page with title 'user/pass' if you succeed.
A stale response is sent iff "stale" is present in the request path.
"""
if not self._ShouldHandleRequest("/auth-digest"):
return False
stale = 'stale' in self.path
nonce = self.GetNonce(force_reset=stale)
opaque = hashlib.md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = hashlib.md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = hashlib.md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = hashlib.md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = hashlib.md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ChunkedServerHandler(self):
"""Send chunked response. Allows to specify chunks parameters:
- waitBeforeHeaders - ms to wait before sending headers
- waitBetweenChunks - ms to wait between chunks
- chunkSize - size of each chunk in bytes
- chunksNumber - number of chunks
Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5
waits one second, then sends headers and five chunks five bytes each."""
if not self._ShouldHandleRequest("/chunked"):
return False
query_char = self.path.find('?')
chunkedSettings = {'waitBeforeHeaders' : 0,
'waitBetweenChunks' : 0,
'chunkSize' : 5,
'chunksNumber' : 5}
if query_char >= 0:
params = self.path[query_char + 1:].split('&')
for param in params:
keyValue = param.split('=')
if len(keyValue) == 2:
try:
chunkedSettings[keyValue[0]] = int(keyValue[1])
except ValueError:
pass
time.sleep(0.001 * chunkedSettings['waitBeforeHeaders'])
self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Connection', 'close')
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
# Chunked encoding: sending all chunks, then final zero-length chunk and
# then final CRLF.
for i in range(0, chunkedSettings['chunksNumber']):
if i > 0:
time.sleep(0.001 * chunkedSettings['waitBetweenChunks'])
self.sendChunkHelp('*' * chunkedSettings['chunkSize'])
self.wfile.flush() # Keep in mind that we start flushing only after 1kb.
self.sendChunkHelp('')
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n")
return True
def NoContentHandler(self):
"""Returns a 204 No Content response."""
if not self._ShouldHandleRequest("/nocontent"):
return False
self.send_response(204)
self.end_headers()
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def CrossSiteRedirectHandler(self):
"""Sends a server redirect to the given site. The syntax is
'/cross-site/hostname/...' to redirect to //hostname/...
It is used to navigate between different Sites, causing
cross-site/cross-process navigations in the browser."""
test_name = "/cross-site"
if not self._ShouldHandleRequest(test_name):
return False
params = urllib.unquote(self.path[(len(test_name) + 1):])
slash = params.find('/')
if slash < 0:
self.sendRedirectHelp(test_name)
return True
host = params[:slash]
path = params[(slash+1):]
dest = "//%s:%s/%s" % (host, str(self.server.server_port), path)
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def GetSSLSessionCacheHandler(self):
"""Send a reply containing a log of the session cache operations."""
if not self._ShouldHandleRequest('/ssl-session-cache'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
try:
log = self.server.session_cache.log
except AttributeError:
self.wfile.write('Pass --https-record-resume in order to use' +
' this request')
return True
for (action, sessionID) in log:
self.wfile.write('%s\t%s\n' % (action, bytes(sessionID).encode('hex')))
return True
def SSLManySmallRecords(self):
"""Sends a reply consisting of a variety of small writes. These will be
translated into a series of small SSL records when used over an HTTPS
server."""
if not self._ShouldHandleRequest('/ssl-many-small-records'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
# Write ~26K of data, in 1350 byte chunks
for i in xrange(20):
self.wfile.write('*' * 1350)
self.wfile.flush()
return True
def GetChannelID(self):
"""Send a reply containing the hashed ChannelID that the client provided."""
if not self._ShouldHandleRequest('/channel-id'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
channel_id = bytes(self.server.tlsConnection.channel_id)
self.wfile.write(hashlib.sha256(channel_id).digest().encode('base64'))
return True
def ClientCipherListHandler(self):
"""Send a reply containing the cipher suite list that the client
provided. Each cipher suite value is serialized in decimal, followed by a
newline."""
if not self._ShouldHandleRequest('/client-cipher-list'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
cipher_suites = self.server.tlsConnection.clientHello.cipher_suites
self.wfile.write('\n'.join(str(c) for c in cipher_suites))
return True
def CloseSocketHandler(self):
"""Closes the socket without sending anything."""
if not self._ShouldHandleRequest('/close-socket'):
return False
self.wfile.close()
return True
def RangeResetHandler(self):
"""Send data broken up by connection resets every N (default 4K) bytes.
Support range requests. If the data requested doesn't straddle a reset
boundary, it will all be sent. Used for testing resuming downloads."""
def DataForRange(start, end):
"""Data to be provided for a particular range of bytes."""
# Offset and scale to avoid too obvious (and hence potentially
# collidable) data.
return ''.join([chr(y % 256)
for y in range(start * 2 + 15, end * 2 + 15, 2)])
if not self._ShouldHandleRequest('/rangereset'):
return False
# HTTP/1.1 is required for ETag and range support.
self.protocol_version = 'HTTP/1.1'
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
# Defaults
size = 8000
# Note that the rst is sent just before sending the rst_boundary byte.
rst_boundary = 4000
respond_to_range = True
hold_for_signal = False
rst_limit = -1
token = 'DEFAULT'
fail_precondition = 0
send_verifiers = True
# Parse the query
qdict = urlparse.parse_qs(query, True)
if 'size' in qdict:
size = int(qdict['size'][0])
if 'rst_boundary' in qdict:
rst_boundary = int(qdict['rst_boundary'][0])
if 'token' in qdict:
# Identifying token for stateful tests.
token = qdict['token'][0]
if 'rst_limit' in qdict:
# Max number of rsts for a given token.
rst_limit = int(qdict['rst_limit'][0])
if 'bounce_range' in qdict:
respond_to_range = False
if 'hold' in qdict:
# Note that hold_for_signal will not work with null range requests;
# see TODO below.
hold_for_signal = True
if 'no_verifiers' in qdict:
send_verifiers = False
if 'fail_precondition' in qdict:
fail_precondition = int(qdict['fail_precondition'][0])
# Record already set information, or set it.
rst_limit = TestPageHandler.rst_limits.setdefault(token, rst_limit)
if rst_limit != 0:
TestPageHandler.rst_limits[token] -= 1
fail_precondition = TestPageHandler.fail_precondition.setdefault(
token, fail_precondition)
if fail_precondition != 0:
TestPageHandler.fail_precondition[token] -= 1
first_byte = 0
last_byte = size - 1
# Does that define what we want to return, or do we need to apply
# a range?
range_response = False
range_header = self.headers.getheader('range')
if range_header and respond_to_range:
mo = re.match("bytes=(\d*)-(\d*)", range_header)
if mo.group(1):
first_byte = int(mo.group(1))
if mo.group(2):
last_byte = int(mo.group(2))
if last_byte > size - 1:
last_byte = size - 1
range_response = True
if last_byte < first_byte:
return False
if (fail_precondition and
(self.headers.getheader('If-Modified-Since') or
self.headers.getheader('If-Match'))):
self.send_response(412)
self.end_headers()
return True
if range_response:
self.send_response(206)
self.send_header('Content-Range',
'bytes %d-%d/%d' % (first_byte, last_byte, size))
else:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', last_byte - first_byte + 1)
if send_verifiers:
# If fail_precondition is non-zero, then the ETag for each request will be
# different.
etag = "%s%d" % (token, fail_precondition)
self.send_header('ETag', etag)
self.send_header('Last-Modified', 'Tue, 19 Feb 2013 14:32 EST')
self.end_headers()
if hold_for_signal:
# TODO(rdsmith/phajdan.jr): http://crbug.com/169519: Without writing
# a single byte, the self.server.handle_request() below hangs
# without processing new incoming requests.
self.wfile.write(DataForRange(first_byte, first_byte + 1))
first_byte = first_byte + 1
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
possible_rst = ((first_byte / rst_boundary) + 1) * rst_boundary
if possible_rst >= last_byte or rst_limit == 0:
# No RST has been requested in this range, so we don't need to
# do anything fancy; just write the data and let the python
# infrastructure close the connection.
self.wfile.write(DataForRange(first_byte, last_byte + 1))
self.wfile.flush()
return True
# We're resetting the connection part way in; go to the RST
# boundary and then send an RST.
# Because socket semantics do not guarantee that all the data will be
# sent when using the linger semantics to hard close a socket,
# we send the data and then wait for our peer to release us
# before sending the reset.
data = DataForRange(first_byte, possible_rst)
self.wfile.write(data)
self.wfile.flush()
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
l_onoff = 1 # Linger is active.
l_linger = 0 # Seconds to linger for.
self.connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
# Close all duplicates of the underlying socket to force the RST.
self.wfile.close()
self.rfile.close()
self.connection.close()
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
self.wfile.write(contents)
return True
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
# called by chunked handling function
def sendChunkHelp(self, chunk):
# Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF
self.wfile.write('%X\r\n' % len(chunk))
self.wfile.write(chunk)
self.wfile.write('\r\n')
class OCSPHandler(testserver_base.BasePageHandler):
def __init__(self, request, client_address, socket_server):
handlers = [self.OCSPResponse]
self.ocsp_response = socket_server.ocsp_response
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, [], handlers, [],
handlers, [])
def OCSPResponse(self):
self.send_response(200)
self.send_header('Content-Type', 'application/ocsp-response')
self.send_header('Content-Length', str(len(self.ocsp_response)))
self.end_headers()
self.wfile.write(self.ocsp_response)
class TCPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for TCP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request.recv(65536).strip()
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
self.request.send(return_data)
class UDPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for UDP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request[0].strip()
request_socket = self.request[1]
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
request_socket.sendto(return_data, self.client_address)
class BasicAuthProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A request handler that behaves as a proxy server which requires
basic authentication. Only CONNECT, GET and HEAD is supported for now.
"""
_AUTH_CREDENTIAL = 'Basic Zm9vOmJhcg==' # foo:bar
def parse_request(self):
"""Overrides parse_request to check credential."""
if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self):
return False
auth = self.headers.getheader('Proxy-Authorization')
if auth != self._AUTH_CREDENTIAL:
self.send_response(407)
self.send_header('Proxy-Authenticate', 'Basic realm="MyRealm1"')
self.end_headers()
return False
return True
def _start_read_write(self, sock):
sock.setblocking(0)
self.request.setblocking(0)
rlist = [self.request, sock]
while True:
ready_sockets, _unused, errors = select.select(rlist, [], [])
if errors:
self.send_response(500)
self.end_headers()
return
for s in ready_sockets:
received = s.recv(1024)
if len(received) == 0:
return
if s == self.request:
other = sock
else:
other = self.request
other.send(received)
def _do_common_method(self):
url = urlparse.urlparse(self.path)
port = url.port
if not port:
if url.scheme == 'http':
port = 80
elif url.scheme == 'https':
port = 443
if not url.hostname or not port:
self.send_response(400)
self.end_headers()
return
if len(url.path) == 0:
path = '/'
else:
path = url.path
if len(url.query) > 0:
path = '%s?%s' % (url.path, url.query)
sock = None
try:
sock = socket.create_connection((url.hostname, port))
sock.send('%s %s %s\r\n' % (
self.command, path, self.protocol_version))
for header in self.headers.headers:
header = header.strip()
if (header.lower().startswith('connection') or
header.lower().startswith('proxy')):
continue
sock.send('%s\r\n' % header)
sock.send('\r\n')
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
if sock is not None:
sock.close()
def do_CONNECT(self):
try:
pos = self.path.rfind(':')
host = self.path[:pos]
port = int(self.path[pos+1:])
except Exception:
self.send_response(400)
self.end_headers()
try:
sock = socket.create_connection((host, port))
self.send_response(200, 'Connection established')
self.end_headers()
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
sock.close()
def do_GET(self):
self._do_common_method()
def do_HEAD(self):
self._do_common_method()
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for the net test servers."""
def __init__(self):
super(ServerRunner, self).__init__()
self.__ocsp_server = None
def __make_data_dir(self):
if self.options.data_dir:
if not os.path.isdir(self.options.data_dir):
raise testserver_base.OptionError('specified data dir not found: ' +
self.options.data_dir + ' exiting...')
my_data_dir = self.options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.join(BASE_DIR, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
def create_server(self, server_data):
port = self.options.port
host = self.options.host
if self.options.server_type == SERVER_HTTP:
if self.options.https:
pem_cert_and_key = None
ocsp_der = None
if self.options.cert_and_key_file:
if not os.path.isfile(self.options.cert_and_key_file):
raise testserver_base.OptionError(
'specified server cert file not found: ' +
self.options.cert_and_key_file + ' exiting...')
pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read()
else:
# generate a new certificate and run an OCSP server for it.
self.__ocsp_server = OCSPServer((host, 0), OCSPHandler)
print ('OCSP server started on %s:%d...' %
(host, self.__ocsp_server.server_port))
ocsp_state = None
if self.options.ocsp == 'ok':
ocsp_state = minica.OCSP_STATE_GOOD
elif self.options.ocsp == 'revoked':
ocsp_state = minica.OCSP_STATE_REVOKED
elif self.options.ocsp == 'invalid':
ocsp_state = minica.OCSP_STATE_INVALID
elif self.options.ocsp == 'unauthorized':
ocsp_state = minica.OCSP_STATE_UNAUTHORIZED
elif self.options.ocsp == 'unknown':
ocsp_state = minica.OCSP_STATE_UNKNOWN
else:
raise testserver_base.OptionError('unknown OCSP status: ' +
self.options.ocsp_status)
(pem_cert_and_key, ocsp_der) = minica.GenerateCertKeyAndOCSP(
subject = "127.0.0.1",
ocsp_url = ("http://%s:%d/ocsp" %
(host, self.__ocsp_server.server_port)),
ocsp_state = ocsp_state,
serial = self.options.cert_serial)
if self.options.ocsp_server_unavailable:
# SEQUENCE containing ENUMERATED with value 3 (tryLater).
self.__ocsp_server.ocsp_response = '30030a0103'.decode('hex')
else:
self.__ocsp_server.ocsp_response = ocsp_der
for ca_cert in self.options.ssl_client_ca:
if not os.path.isfile(ca_cert):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' + ca_cert +
' exiting...')
stapled_ocsp_response = None
if self.options.staple_ocsp_response:
stapled_ocsp_response = ocsp_der
server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key,
self.options.ssl_client_auth,
self.options.ssl_client_ca,
self.options.ssl_client_cert_type,
self.options.ssl_bulk_cipher,
self.options.ssl_key_exchange,
self.options.enable_npn,
self.options.record_resume,
self.options.tls_intolerant,
self.options.tls_intolerance_type,
self.options.signed_cert_timestamps_tls_ext.decode(
"base64"),
self.options.fallback_scsv,
stapled_ocsp_response,
self.options.disable_session_cache)
print 'HTTPS server started on https://%s:%d...' % \
(host, server.server_port)
else:
server = HTTPServer((host, port), TestPageHandler)
print 'HTTP server started on http://%s:%d...' % \
(host, server.server_port)
server.data_dir = self.__make_data_dir()
server.file_root_url = self.options.file_root_url
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_WEBSOCKET:
# Launch pywebsocket via WebSocketServer.
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
# TODO(toyoshim): Remove following os.chdir. Currently this operation
# is required to work correctly. It should be fixed from pywebsocket side.
os.chdir(self.__make_data_dir())
websocket_options = WebSocketOptions(host, port, '.')
scheme = "ws"
if self.options.cert_and_key_file:
scheme = "wss"
websocket_options.use_tls = True
websocket_options.private_key = self.options.cert_and_key_file
websocket_options.certificate = self.options.cert_and_key_file
if self.options.ssl_client_auth:
websocket_options.tls_client_cert_optional = False
websocket_options.tls_client_auth = True
if len(self.options.ssl_client_ca) != 1:
raise testserver_base.OptionError(
'one trusted client CA file should be specified')
if not os.path.isfile(self.options.ssl_client_ca[0]):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' +
self.options.ssl_client_ca[0] + ' exiting...')
websocket_options.tls_client_ca = self.options.ssl_client_ca[0]
server = WebSocketServer(websocket_options)
print 'WebSocket server started on %s://%s:%d...' % \
(scheme, host, server.server_port)
server_data['port'] = server.server_port
websocket_options.use_basic_auth = self.options.ws_basic_auth
elif self.options.server_type == SERVER_TCP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = TCPEchoServer((host, port), TCPEchoHandler)
print 'Echo TCP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_UDP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = UDPEchoServer((host, port), UDPEchoHandler)
print 'Echo UDP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_BASIC_AUTH_PROXY:
server = HTTPServer((host, port), BasicAuthProxyRequestHandler)
print 'BasicAuthProxy server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_FTP:
my_data_dir = self.__make_data_dir()
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to address:port
server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler)
server_data['port'] = server.socket.getsockname()[1]
print 'FTP server started on port %d...' % server_data['port']
else:
raise testserver_base.OptionError('unknown server type' +
self.options.server_type)
return server
def run_server(self):
if self.__ocsp_server:
self.__ocsp_server.serve_forever_on_thread()
testserver_base.TestServerRunner.run_server(self)
if self.__ocsp_server:
self.__ocsp_server.stop_serving()
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--disable-session-cache',
action='store_true',
dest='disable_session_cache',
help='tells the server to disable the'
'TLS session cache.')
self.option_parser.add_option('-f', '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='start up an FTP server.')
self.option_parser.add_option('--tcp-echo', action='store_const',
const=SERVER_TCP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a tcp echo server.')
self.option_parser.add_option('--udp-echo', action='store_const',
const=SERVER_UDP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a udp echo server.')
self.option_parser.add_option('--basic-auth-proxy', action='store_const',
const=SERVER_BASIC_AUTH_PROXY,
default=SERVER_HTTP, dest='server_type',
help='start up a proxy server which requires '
'basic authentication.')
self.option_parser.add_option('--websocket', action='store_const',
const=SERVER_WEBSOCKET, default=SERVER_HTTP,
dest='server_type',
help='start up a WebSocket server.')
self.option_parser.add_option('--https', action='store_true',
dest='https', help='Specify that https '
'should be used.')
self.option_parser.add_option('--cert-and-key-file',
dest='cert_and_key_file', help='specify the '
'path to the file containing the certificate '
'and private key for the server in PEM '
'format')
self.option_parser.add_option('--ocsp', dest='ocsp', default='ok',
help='The type of OCSP response generated '
'for the automatically generated '
'certificate. One of [ok,revoked,invalid]')
self.option_parser.add_option('--cert-serial', dest='cert_serial',
default=0, type=int,
help='If non-zero then the generated '
'certificate will have this serial number')
self.option_parser.add_option('--tls-intolerant', dest='tls_intolerant',
default='0', type='int',
help='If nonzero, certain TLS connections '
'will be aborted in order to test version '
'fallback. 1 means all TLS versions will be '
'aborted. 2 means TLS 1.1 or higher will be '
'aborted. 3 means TLS 1.2 or higher will be '
'aborted.')
self.option_parser.add_option('--tls-intolerance-type',
dest='tls_intolerance_type',
default="alert",
help='Controls how the server reacts to a '
'TLS version it is intolerant to. Valid '
'values are "alert", "close", and "reset".')
self.option_parser.add_option('--signed-cert-timestamps-tls-ext',
dest='signed_cert_timestamps_tls_ext',
default='',
help='Base64 encoded SCT list. If set, '
'server will respond with a '
'signed_certificate_timestamp TLS extension '
'whenever the client supports it.')
self.option_parser.add_option('--fallback-scsv', dest='fallback_scsv',
default=False, const=True,
action='store_const',
help='If given, TLS_FALLBACK_SCSV support '
'will be enabled. This causes the server to '
'reject fallback connections from compatible '
'clients (e.g. Chrome).')
self.option_parser.add_option('--staple-ocsp-response',
dest='staple_ocsp_response',
default=False, action='store_true',
help='If set, server will staple the OCSP '
'response whenever OCSP is on and the client '
'supports OCSP stapling.')
self.option_parser.add_option('--https-record-resume',
dest='record_resume', const=True,
default=False, action='store_const',
help='Record resumption cache events rather '
'than resuming as normal. Allows the use of '
'the /ssl-session-cache request')
self.option_parser.add_option('--ssl-client-auth', action='store_true',
help='Require SSL client auth on every '
'connection.')
self.option_parser.add_option('--ssl-client-ca', action='append',
default=[], help='Specify that the client '
'certificate request should include the CA '
'named in the subject of the DER-encoded '
'certificate contained in the specified '
'file. This option may appear multiple '
'times, indicating multiple CA names should '
'be sent in the request.')
self.option_parser.add_option('--ssl-client-cert-type', action='append',
default=[], help='Specify that the client '
'certificate request should include the '
'specified certificate_type value. This '
'option may appear multiple times, '
'indicating multiple values should be send '
'in the request. Valid values are '
'"rsa_sign", "dss_sign", and "ecdsa_sign". '
'If omitted, "rsa_sign" will be used.')
self.option_parser.add_option('--ssl-bulk-cipher', action='append',
help='Specify the bulk encryption '
'algorithm(s) that will be accepted by the '
'SSL server. Valid values are "aes128gcm", '
'"aes256", "aes128", "3des", "rc4". If '
'omitted, all algorithms will be used. This '
'option may appear multiple times, '
'indicating multiple algorithms should be '
'enabled.');
self.option_parser.add_option('--ssl-key-exchange', action='append',
help='Specify the key exchange algorithm(s)'
'that will be accepted by the SSL server. '
'Valid values are "rsa", "dhe_rsa", '
'"ecdhe_rsa". If omitted, all algorithms '
'will be used. This option may appear '
'multiple times, indicating multiple '
'algorithms should be enabled.');
# TODO(davidben): Add ALPN support to tlslite.
self.option_parser.add_option('--enable-npn', dest='enable_npn',
default=False, const=True,
action='store_const',
help='Enable server support for the NPN '
'extension. The server will advertise '
'support for exactly one protocol, http/1.1')
self.option_parser.add_option('--file-root-url', default='/files/',
help='Specify a root URL for files served.')
# TODO(ricea): Generalize this to support basic auth for HTTP too.
self.option_parser.add_option('--ws-basic-auth', action='store_true',
dest='ws_basic_auth',
help='Enable basic-auth for WebSocket')
self.option_parser.add_option('--ocsp-server-unavailable',
dest='ocsp_server_unavailable',
default=False, action='store_true',
help='If set, the OCSP server will return '
'a tryLater status rather than the actual '
'OCSP response.')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
|
ppwwyyxx/tensorflow | refs/heads/master | tensorflow/compiler/tests/nullary_ops_test.py | 24 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with no arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class NullaryOpsTest(xla_test.XLATestCase):
def _testNullary(self, op, expected):
with self.session() as session:
with self.test_scope():
output = op()
result = session.run(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testNoOp(self):
with self.session():
with self.test_scope():
output = control_flow_ops.no_op()
# This should not crash.
output.run()
def testConstants(self):
for dtype in self.numeric_types:
constants = [
dtype(42),
np.array([], dtype=dtype),
np.array([1, 2], dtype=dtype),
np.array([7, 7, 7, 7, 7], dtype=dtype),
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4], [5, 6]], [[10, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
def testComplexConstants(self):
for dtype in self.complex_types:
constants = [
dtype(42 + 3j),
np.array([], dtype=dtype),
np.ones([50], dtype=dtype) * (3 + 4j),
np.array([1j, 2 + 1j], dtype=dtype),
np.array([[1, 2j, 7j], [4, 5, 6]], dtype=dtype),
np.array([[[1, 2], [3, 4 + 6j], [5, 6]],
[[10 + 7j, 20], [30, 40], [50, 60]]],
dtype=dtype),
np.array([[[]], [[]]], dtype=dtype),
np.array([[[[1 + 3j]]]], dtype=dtype),
]
for c in constants:
self._testNullary(lambda c=c: constant_op.constant(c), expected=c)
if __name__ == "__main__":
googletest.main()
|
Stargrazer82301/CAAPR | refs/heads/master | CAAPR/CAAPR_AstroMagic/PTS/pts/do/core/add_paper.py | 1 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.add_paper Add a paper to the local collection of papers.
#
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import argparse
# Import the relevant PTS classes and modules
from pts.core.tools.papers import Papers
# -----------------------------------------------------------------
# Create the command-line parser
parser = argparse.ArgumentParser()
# Arguments
parser.add_argument("label", type=str, help="label")
parser.add_argument("url", type=str, help="url")
# Parse the command line arguments
arguments = parser.parse_args()
# -----------------------------------------------------------------
papers = Papers()
papers.add_entry(arguments.label, arguments.url)
papers.save()
# -----------------------------------------------------------------
|
WillGuan105/django | refs/heads/master | django/contrib/auth/__init__.py | 69 | import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
|
freedomtan/tensorflow | refs/heads/master | tensorflow/lite/tools/pip_package/setup.py | 9 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite is for mobile and embedded devices.
TensorFlow Lite is the official solution for running machine learning models on
mobile and embedded devices. It enables on-device machine learning inference
with low latency and a small binary size on Android, iOS, and other operating
systems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import os
import subprocess
import sys
import sysconfig
from distutils.command.build_ext import build_ext
import numpy
import pybind11
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py
PACKAGE_NAME = 'tflite_runtime'
PACKAGE_VERSION = os.environ['PACKAGE_VERSION']
DOCLINES = __doc__.split('\n')
TENSORFLOW_DIR = os.environ['TENSORFLOW_DIR']
RELATIVE_MAKE_DIR = os.path.join('tensorflow', 'lite', 'tools', 'make')
MAKE_DIR = os.path.join(TENSORFLOW_DIR, RELATIVE_MAKE_DIR)
DOWNLOADS_DIR = os.path.join(MAKE_DIR, 'downloads')
RELATIVE_MAKEFILE_PATH = os.path.join(RELATIVE_MAKE_DIR, 'Makefile')
DOWNLOAD_SCRIPT_PATH = os.path.join(MAKE_DIR, 'download_dependencies.sh')
# Setup cross compiling
TARGET = os.environ.get('TENSORFLOW_TARGET')
if TARGET == 'rpi':
os.environ['CXX'] = 'arm-linux-gnueabihf-g++'
os.environ['CC'] = 'arm-linux-gnueabihf-gcc'
elif TARGET == 'aarch64':
os.environ['CXX'] = 'aarch64-linux-gnu-g++'
os.environ['CC'] = 'aarch64-linux-gnu-gcc'
MAKE_CROSS_OPTIONS = []
for name in [
'TARGET', 'TARGET_ARCH', 'CC_PREFIX', 'EXTRA_CXXFLAGS', 'EXTRA_CFLAGS'
]:
value = os.environ.get('TENSORFLOW_%s' % name)
if value:
MAKE_CROSS_OPTIONS.append('%s=%s' % (name, value))
# Check physical memory and if we are on a reasonable non small SOC machine
# with more than 4GB, use all the CPUs, otherwise only 1.
def get_build_cpus():
physical_bytes = os.sysconf('SC_PAGESIZE') * os.sysconf('SC_PHYS_PAGES')
if physical_bytes < (1 << 30) * 4:
return 1
else:
return multiprocessing.cpu_count()
def make_args(target='', quiet=True):
"""Construct make command line."""
args = ([
'make', 'SHELL=/bin/bash', 'BUILD_WITH_NNAPI=false', '-C', TENSORFLOW_DIR
] + MAKE_CROSS_OPTIONS +
['-f', RELATIVE_MAKEFILE_PATH, '-j',
str(get_build_cpus())])
if quiet:
args.append('--quiet')
if target:
args.append(target)
return args
def make_output(target):
"""Invoke make on the target and return output."""
return subprocess.check_output(make_args(target)).decode('utf-8').strip()
def make():
"""Invoke make to build tflite C++ sources.
Build dependencies:
apt-get install swig libjpeg-dev zlib1g-dev python3-dev python3-nump
"""
subprocess.check_call(make_args(quiet=False))
def download_dependencies():
"""Download build dependencies if haven't done yet."""
if not os.path.isdir(DOWNLOADS_DIR) or not os.listdir(DOWNLOADS_DIR):
subprocess.check_call(DOWNLOAD_SCRIPT_PATH)
class CustomBuildExt(build_ext, object):
"""Customized build extension."""
def get_ext_filename(self, ext_name):
if TARGET:
ext_path = ext_name.split('.')
return os.path.join(*ext_path) + '.so'
return super(CustomBuildExt, self).get_ext_filename(ext_name)
def run(self):
download_dependencies()
make()
return super(CustomBuildExt, self).run()
class CustomBuildPy(build_py, object):
def run(self):
self.run_command('build_ext')
return super(CustomBuildPy, self).run()
def get_pybind_include():
"""pybind11 include directory is not correctly resolved.
This fixes include directory to /usr/local/pythonX.X
Returns:
include directories to find pybind11
"""
if sys.version_info[0] == 3:
include_dirs = glob.glob('/usr/local/include/python3*')
else:
include_dirs = glob.glob('/usr/local/include/python2*')
include_dirs.append(sysconfig.get_path('include'))
tmp_include_dirs = []
pip_dir = os.path.join(TENSORFLOW_DIR, 'tensorflow', 'lite', 'tools',
'pip_package', 'gen')
for include_dir in include_dirs:
tmp_include_dir = os.path.join(pip_dir, include_dir[1:])
tmp_include_dirs.append(tmp_include_dir)
try:
os.makedirs(tmp_include_dir)
os.symlink(include_dir, os.path.join(tmp_include_dir, 'include'))
except IOError: # file already exists.
pass
return tmp_include_dirs
LIB_TFLITE = 'tensorflow-lite'
LIB_TFLITE_DIR = make_output('libdir')
ext = Extension(
name='%s._pywrap_tensorflow_interpreter_wrapper' % PACKAGE_NAME,
language='c++',
sources=[
'interpreter_wrapper/interpreter_wrapper.cc',
'interpreter_wrapper/interpreter_wrapper_pybind11.cc',
'interpreter_wrapper/numpy.cc',
'interpreter_wrapper/python_error_reporter.cc',
'interpreter_wrapper/python_utils.cc'
],
extra_compile_args=['--std=c++11'],
include_dirs=[
TENSORFLOW_DIR,
os.path.join(TENSORFLOW_DIR, 'tensorflow', 'lite', 'tools',
'pip_package'),
numpy.get_include(),
os.path.join(DOWNLOADS_DIR, 'flatbuffers', 'include'),
os.path.join(DOWNLOADS_DIR, 'absl'),
pybind11.get_include()
],
libraries=[LIB_TFLITE],
library_dirs=[LIB_TFLITE_DIR])
setup(
name=PACKAGE_NAME.replace('_', '-'),
version=PACKAGE_VERSION,
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/lite/',
author='Google, LLC',
author_email='packages@tensorflow.org',
license='Apache 2.0',
include_package_data=True,
keywords='tflite tensorflow tensor machine learning',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=[]),
ext_modules=[ext],
install_requires=[
'numpy >= 1.16.0',
],
cmdclass={
'build_ext': CustomBuildExt,
'build_py': CustomBuildPy,
})
|
glouppe/scikit-learn | refs/heads/master | examples/plot_multioutput_face_completion.py | 330 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
mrquim/repository.mrquim | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/amcnetworks.py | 24 | # coding: utf-8
from __future__ import unicode_literals
from .theplatform import ThePlatformIE
from ..utils import (
update_url_query,
parse_age_limit,
int_or_none,
)
class AMCNetworksIE(ThePlatformIE):
_VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|wetv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
'md5': '',
'info_dict': {
'id': 's3MX01Nl4vPH',
'ext': 'mp4',
'title': 'Maron - Season 4 - Step 1',
'description': 'In denial about his current situation, Marc is reluctantly convinced by his friends to enter rehab. Starring Marc Maron and Constance Zimmer.',
'age_limit': 17,
'upload_date': '20160505',
'timestamp': 1462468831,
'uploader': 'AMCN',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Requires TV provider accounts',
}, {
'url': 'http://www.bbcamerica.com/shows/the-hunt/full-episodes/season-1/episode-01-the-hardest-challenge',
'only_matching': True,
}, {
'url': 'http://www.amc.com/shows/preacher/full-episodes/season-01/episode-00/pilot',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/million-dollar-matchmaker/season-01/episode-06-the-dumped-dj-and-shallow-hal',
'only_matching': True,
}, {
'url': 'http://www.ifc.com/movies/chaos',
'only_matching': True,
}, {
'url': 'http://www.bbcamerica.com/shows/doctor-who/full-episodes/the-power-of-the-daleks/episode-01-episode-1-color-version',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/mama-june-from-not-to-hot/full-episode/season-01/thin-tervention',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
query = {
'mbr': 'true',
'manifest': 'm3u',
}
media_url = self._search_regex(
r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)',
webpage, 'media url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'link\.theplatform\.com/s/([^?]+)',
media_url, 'theplatform_path'), display_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
video_id = theplatform_metadata['pid']
title = theplatform_metadata['title']
rating = theplatform_metadata['ratings'][0]['rating']
auth_required = self._search_regex(
r'window\.authRequired\s*=\s*(true|false);',
webpage, 'auth required')
if auth_required == 'true':
requestor_id = self._search_regex(
r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)',
webpage, 'requestor id')
resource = self._get_mvpd_resource(
requestor_id, title, video_id, rating)
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
media_url = update_url_query(media_url, query)
formats, subtitles = self._extract_theplatform_smil(
media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'subtitles': subtitles,
'formats': formats,
'age_limit': parse_age_limit(parse_age_limit(rating)),
})
ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
if ns_keys:
ns = list(ns_keys)[0]
series = theplatform_metadata.get(ns + '$show')
season_number = int_or_none(
theplatform_metadata.get(ns + '$season'))
episode = theplatform_metadata.get(ns + '$episodeTitle')
episode_number = int_or_none(
theplatform_metadata.get(ns + '$episode'))
if season_number:
title = 'Season %d - %s' % (season_number, title)
if series:
title = '%s - %s' % (series, title)
info.update({
'title': title,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
})
return info
|
strahlex/machinetalk-protobuf | refs/heads/master | python/machinetalk/protobuf/firmware_pb2.py | 9 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: machinetalk/protobuf/firmware.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from machinetalk.protobuf import nanopb_pb2 as machinetalk_dot_protobuf_dot_nanopb__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='machinetalk/protobuf/firmware.proto',
package='machinetalk',
syntax='proto2',
serialized_pb=_b('\n#machinetalk/protobuf/firmware.proto\x12\x0bmachinetalk\x1a!machinetalk/protobuf/nanopb.proto\"6\n\tConnector\x12\x13\n\x04name\x18\x01 \x01(\tB\x05\x92?\x02\x08\x14\x12\x0c\n\x04pins\x18\x02 \x01(\x0f:\x06\x92?\x03H\xfc\x02\"\xc4\x01\n\x08\x46irmware\x12\x18\n\tbuild_sha\x18\x01 \x01(\tB\x05\x92?\x02\x08(\x12\x1f\n\x10\x66pga_part_number\x18\x02 \x01(\tB\x05\x92?\x02\x08\x14\x12\x30\n\tconnector\x18\x03 \x03(\x0b\x32\x16.machinetalk.ConnectorB\x05\x92?\x02\x10\x10\x12\x10\n\x08num_leds\x18\x04 \x01(\x0f\x12\x19\n\nboard_name\x18\x05 \x01(\tB\x05\x92?\x02\x08\x1e\x12\x16\n\x07\x63omment\x18\x06 \x01(\tB\x05\x92?\x02\x08P:\x06\x92?\x03H\x81\x03')
,
dependencies=[machinetalk_dot_protobuf_dot_nanopb__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CONNECTOR = _descriptor.Descriptor(
name='Connector',
full_name='machinetalk.Connector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='machinetalk.Connector.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\024'))),
_descriptor.FieldDescriptor(
name='pins', full_name='machinetalk.Connector.pins', index=1,
number=2, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\374\002')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=141,
)
_FIRMWARE = _descriptor.Descriptor(
name='Firmware',
full_name='machinetalk.Firmware',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='build_sha', full_name='machinetalk.Firmware.build_sha', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010('))),
_descriptor.FieldDescriptor(
name='fpga_part_number', full_name='machinetalk.Firmware.fpga_part_number', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\024'))),
_descriptor.FieldDescriptor(
name='connector', full_name='machinetalk.Firmware.connector', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\020\020'))),
_descriptor.FieldDescriptor(
name='num_leds', full_name='machinetalk.Firmware.num_leds', index=3,
number=4, type=15, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='board_name', full_name='machinetalk.Firmware.board_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\036'))),
_descriptor.FieldDescriptor(
name='comment', full_name='machinetalk.Firmware.comment', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010P'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\201\003')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=340,
)
_FIRMWARE.fields_by_name['connector'].message_type = _CONNECTOR
DESCRIPTOR.message_types_by_name['Connector'] = _CONNECTOR
DESCRIPTOR.message_types_by_name['Firmware'] = _FIRMWARE
Connector = _reflection.GeneratedProtocolMessageType('Connector', (_message.Message,), dict(
DESCRIPTOR = _CONNECTOR,
__module__ = 'machinetalk.protobuf.firmware_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Connector)
))
_sym_db.RegisterMessage(Connector)
Firmware = _reflection.GeneratedProtocolMessageType('Firmware', (_message.Message,), dict(
DESCRIPTOR = _FIRMWARE,
__module__ = 'machinetalk.protobuf.firmware_pb2'
# @@protoc_insertion_point(class_scope:machinetalk.Firmware)
))
_sym_db.RegisterMessage(Firmware)
_CONNECTOR.fields_by_name['name'].has_options = True
_CONNECTOR.fields_by_name['name']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\024'))
_CONNECTOR.has_options = True
_CONNECTOR._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\374\002'))
_FIRMWARE.fields_by_name['build_sha'].has_options = True
_FIRMWARE.fields_by_name['build_sha']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010('))
_FIRMWARE.fields_by_name['fpga_part_number'].has_options = True
_FIRMWARE.fields_by_name['fpga_part_number']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\024'))
_FIRMWARE.fields_by_name['connector'].has_options = True
_FIRMWARE.fields_by_name['connector']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\020\020'))
_FIRMWARE.fields_by_name['board_name'].has_options = True
_FIRMWARE.fields_by_name['board_name']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010\036'))
_FIRMWARE.fields_by_name['comment'].has_options = True
_FIRMWARE.fields_by_name['comment']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222?\002\010P'))
_FIRMWARE.has_options = True
_FIRMWARE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\222?\003H\201\003'))
# @@protoc_insertion_point(module_scope)
|
klebercode/rhape | refs/heads/master | rha/core/views.py | 1 | # coding: utf-8
from django.shortcuts import render, redirect
from forms import ContactForm, SubscribeForm
from models import (Enterprise, Contact, Team, Cost, Partner,
Gallery, Course, Objective, Public, Step)
def home(request):
context = {}
context['enterprises'] = Enterprise.objects.all()
context['contacts'] = Contact.objects.all()
context['teans'] = Team.objects.all()
context['costs'] = Cost.objects.all()
context['partners'] = Partner.objects.all()
context['galleries'] = Gallery.objects.all()
context['courses'] = Course.objects.all()
context['objectivies'] = Objective.objects.all()
context['publics'] = Public.objects.all()
context['steps'] = Step.objects.all()
if request.method == 'POST':
if request.POST['action'] == 'contact':
contact_form = ContactForm(request.POST, prefix='Contact')
subscribe_form = SubscribeForm(prefix='Subscribe')
if contact_form.is_valid():
contact_form.send_mail()
context['contact_success'] = True
elif request.POST['action'] == 'subscribe':
subscribe_form = SubscribeForm(request.POST,
prefix='Subscribe')
contact_form = ContactForm(prefix='Contact')
if subscribe_form.is_valid():
# obj = subscribe_form.save()
subscribe_form.save()
subscribe_form.send_mail()
context['subscribe_success'] = True
else:
contact_form = ContactForm(prefix='Contact')
subscribe_form = SubscribeForm(prefix='Subscribe')
context['contact_form'] = contact_form
context['subscribe_form'] = subscribe_form
return render(request, 'index.html', context)
def email(request):
return redirect('https://www.zoho.com/mail/login.html')
|
jesobreira/gwrite | refs/heads/master | gwrite/gtklatex.py | 4 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''Gtk LaTex
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import thread
import time
import subprocess
import os, sys
import base64
try: from gi.repository import GtkSource
except: GtkSource = None
try: import i18n
except: from gettext import gettext as _
latex_mark_list = [
# ["+", r" + "],
# ["<big>-</big>", r" - "],
["<b>⋅</b>", r" \cdot "],
["x", r" \times "],
["/", r" / "],
["<big><b>÷</b></big>", r" \frac { } { }"],
["a<sup>n</sup>", r"^{%s}"],
["a<sub>n</sub>", r"_{%s}"],
[" ≠ ", r" \neq "],
[" ≤ ", r" \le "],
[" ≥ ", r" \ge "],
[" ≡ ", r" \equiv "],
[" ≪ ", r" \ll "],
[" ≫ ", r" \gg "],
[" ≃ ", r" \simeq "],
[" ≈ ", r" \approx "],
["√¯", r" \sqrt[] {%s}"],
["∫", r" \int^{}_{} "],
["∬", r" \iint^{}_{} "],
["∮", r" \oint^{}_{} "],
["[ ]", r"\[ %s \]"],
["( )", r"\( %s \)"],
["{ }", r"\{ %s \}"],
["[≡]", r"""
\[
\begin{matrix}
a & b & c\\
c & e & f
\end{matrix}
\]
"""],
["(≡)", r"""
\begin{pmatrix}
a & b & c\\
c & e & f
\end{pmatrix}
"""],
["(<big> : </big>)", r"{ } \choose { } "],
["<big>(</big> x <big>)</big>", r"\left( { %s } \right)"],
[" ± ", r" \pm "],
[" ∓ ", r" \mp "],
[" ∨ ", r" \lor" ],
[" ∧ ", r" \land "],
["mod", r" \bmod "],
[" ∼ ", r" \sim "],
["∥ ", r" \parallel "],
[" ⊥ ", r" \perp "],
["<big><big>∞</big></big>", r" \infty "],
["∠", r" \angle "],
["<big><b>△</b></big>", r" \triangle "],
["∑", r" \sum_{ }^{ } "],
["lim", r"\lim_{ }"],
["⇒", r" \Rightarrow "],
["⇔", r" \Leftrightarrow "],
["∧", r" \wedge "],
["∨", r" \vee "],
["¬", r" \neg "],
["∀", r" \forall "],
["∃", r" \exists "],
["∅", r" \varnothing "],
["∈", r" \in "],
["∉", r" \notin "],
["⊆", r" \subseteq "],
["⊂", r" \subset "],
["∪", r" \cup "],
["⋂", r" \cap "],
["→", r" \to "],
["↦", r" \mapsto "],
["∏", r" \prod "],
["○", r" \circ "],
["sin", r" \sin "],
["cos", r" \cos "],
["tan", r" \tan "],
["ctan", r" \ctab "],
["asin", r" \asin "],
["acos", r" \acos "],
["atan", r" \atan "],
["actan", r" \actan "],
["log", r" \log "],
["ln", r" \ln "],
["...", r" \cdots "],
[" <sub>...</sub> ", r" \ldots "],
["<big>⁝</big>", r" \vdots "],
["<sup>.</sup>.<sub>.</sub>", r" \ddots "],
["α", r" \alpha "],
["β", r" \beta "],
["Γ", r" \Gamma "],
["γ", r" \gamma "],
["Δ", r" \Delta "],
["δ", r" \delta "],
["ϵ", r" \epsilon "],
["ε", r" \varepsilon "],
["ζ", r" \zeta "],
["η", r" \eta "],
["Θ", r" \Theta "],
["θ", r" \theta "],
["ϑ", r" \vartheta "],
["ι", r" \iota "],
["κ", r" \kappa "],
["Λ", r" \Lambda "],
["λ", r" \lambda "],
["μ", r" \mu "],
["ν", r" \nu "],
["Ξ", r" \Xi "],
["ξ", r" \xi "],
["Π", r" \Pi "],
["π", r" \pi "],
["ϖ", r" \varpi "],
["ρ", r" \rho "],
["ϱ", r" \varrho "],
["Σ", r" \Sigma "],
["σ", r" \sigma "],
["ς", r" \varsigma "],
["τ", r" \tau "],
["Υ", r" \Upsilon "],
["υ", r" \upsilon "],
["Φ", r" \Phi "],
["ϕ", r" \phi "],
["φ", r" \varphi "],
["χ", r" \chi "],
["Ψ", r" \Psi "],
["ψ", r" \psi "],
["Ω", r" \Omega "],
["ω", r" \omega "],
]
class GtkToolBoxView(Gtk.TextView):
'''流式布局 ToolBox
'''
def __init__(self, latex=""):
'''初始化
'''
Gtk.TextView.__init__(self)
self.props.can_focus = 0
self.set_editable(0)
self.set_wrap_mode(Gtk.WrapMode.WORD)
self.connect('realize', self.on_realize)
pass
def on_realize(self, *args):
## 将默认 I 形鼠标指针换成箭头
self.get_window(Gtk.TextWindowType.TEXT).set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
pass
def add(self, widget):
'''插入 Widget
'''
buffer = self.get_buffer()
iter = buffer.get_end_iter()
anchor = buffer.create_child_anchor(iter)
buffer.insert(iter, "")
#widget.set_data('buffer_anchor', anchor)
widget.buffer_anchor = anchor
self.add_child_at_anchor(widget, anchor)
pass
def remove(self, widget):
'''删除 widget
'''
anchor = widget.get_data('buffer_anchor')
if anchor:
buffer = self.get_buffer()
start = buffer.get_iter_at_child_anchor(anchor)
end = buffer.get_iter_at_offset( start.get_offset() + 1 )
buffer.delete(start, end)
pass
pass
class LatexMathExpressionsEditor(Gtk.Table):
'''LaTex 数学公式编辑器
'''
def __init__(self, latex=""):
'''初始化
'''
Gtk.Table.__init__(self)
self.set_row_spacings(10)
self.set_col_spacings(10)
## latex edit
scrolledwindow1 = Gtk.ScrolledWindow()
scrolledwindow1.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolledwindow1.show()
scrolledwindow1.set_shadow_type(Gtk.ShadowType.IN)
if GtkSource:
self.latex_textview = GtkSource.View()
lm = GtkSource.LanguageManager.get_default()
language = lm.get_language('latex')
buffer = GtkSource.Buffer()
buffer.set_highlight_syntax(1)
buffer.set_language(language)
self.latex_textview.set_buffer(buffer)
pass
else:
self.latex_textview = Gtk.TextView()
pass
self.latex_textview.set_wrap_mode(Gtk.WrapMode.WORD)
self.latex_textview.set_cursor_visible(True)
self.latex_textview.set_indent(5)
self.latex_textview.set_editable(True)
self.latex_textview.show()
#self.latex_textview.set_size_request(302, 200)
buffer = self.latex_textview.get_buffer()
buffer.set_text(latex)
scrolledwindow1.add(self.latex_textview)
self.attach(scrolledwindow1, 0, 1, 0, 1)
## latex preview
self.latex_image = Gtk.Image()
#self.latex_image.set_size_request(200, 100)
self.latex_image.set_padding(0, 0)
self.latex_image.show()
box = Gtk.EventBox()
box.show()
box.modify_bg(Gtk.StateType.NORMAL, Gdk.Color.parse("#FFFFFF")[1])
box.add(self.latex_image)
self.attach(box, 0, 1, 1, 2)
## toolbox
toolview = GtkToolBoxView()
toolview.show()
#toolview.set_size_request(302, 200)
for text, mark in latex_mark_list:
label = Gtk.Label()
label.set_markup(text)
label.set_size_request(30, 20)
label.show()
button = Gtk.Button()
button.props.can_focus = 0
button.add(label)
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect("clicked", self.on_insert_tex_mark, text, mark)
button.set_tooltip_text(mark)
button.show()
toolview.add(button)
pass
scrolledwindow2 = Gtk.ScrolledWindow()
#scrolledwindow2.set_size_request(300, 400)
scrolledwindow2.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolledwindow2.show()
scrolledwindow2.set_shadow_type(Gtk.ShadowType.IN)
scrolledwindow2.add(toolview)
self.attach(scrolledwindow2, 1, 2, 0, 2)
self.show_all()
thread.start_new_thread(self._up_preview, ())
pass
def get_latex(self, *args):
'''获取 LaTex
'''
buffer = self.latex_textview.get_buffer()
return buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter(), 1)
def set_pic(self, data):
'''设置图像
'''
if not data:
return self.latex_image.set_from_stock(Gtk.STOCK_DIALOG_ERROR, 2)
pix = GdkPixbuf.PixbufLoader()
pix.write(data)
pix.close()
self.latex_image.set_from_pixbuf(pix.get_pixbuf())
return
def _up_preview(self, *args):
'''用于定时更新预览
'''
old_latex = ""
while True:
time.sleep(1)
if not self.get_window():
break
latex = self.get_latex()
if latex == old_latex:
continue
pic = tex2gif(latex, 1)
old_latex = self.get_latex()
if latex == self.get_latex():
GObject.idle_add(self.set_pic, pic)
pass
pass
#-print 'done'
return
def up_preview(self, pic):
'''更新预览'''
return
def insert_latex_mark(self, view, mark, text=""):
'''在 Gtk.TextView 插入 LaTex 标记
'''
buffer = view.get_buffer()
bounds = buffer.get_selection_bounds()
select = bounds and buffer.get_slice(bounds[0], bounds[1]) or text
if mark.count("%") == 1:
mark = mark % select
pass
else:
mark = mark + select
pass
buffer.delete_selection(1, 1)
buffer.insert_at_cursor(mark)
pass
def on_insert_tex_mark(self, widget, text, mark):
print 'on_insert_tex_mark:', text, mark
self.insert_latex_mark(self.latex_textview, mark)
pass
def latex_dlg(latex="", title=_("LaTeX math expressions"), parent=None):
dlg = Gtk.Dialog(title, parent, Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK ))
dlg.set_default_size(680, 400)
editor = LatexMathExpressionsEditor(latex)
dlg.vbox.pack_start(editor, True, True, 5)
dlg.show_all()
resp = dlg.run()
latex = editor.get_latex()
dlg.destroy()
if resp == Gtk.ResponseType.OK:
return latex
return None
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
def tex2gif(tex, transparent=1):
'''将 latex 数学公式转为 gif 图片,依赖 mimetex
mimetex -d -s 7 '公式'
'''
if transparent:
cmd = ['mimetex', '-d', '-s', '4', tex]
pass
else:
cmd = ['mimetex', '-d', '-o', '-s', '4', tex]
pass
i = subprocess.Popen(cmd, stdout=subprocess.PIPE)
gif = i.communicate()[0]
if gif.startswith('GIF'):
return gif
return ""
def gif2base64(gif):
'''将 gif 图像转为 base64 内联图像
'''
return 'data:image/gif;base64,%s' % base64.encodestring(gif).replace('\n', '')
def tex2base64(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return gif2base64(tex2gif(tex))
def tex2html(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return '<img alt="mimetex:%s" onDblClick="if(uptex) uptex(this);" style="vertical-align: middle; position: relative; top: -5pt; border: 0;" src="%s" />' % (stastr(tex), gif2base64(tex2gif(tex)))
if __name__=="__main__":
Gdk.threads_init()
latex = ' '.join(sys.argv[1:]) or 'E=MC^2'
latex = latex_dlg(latex)
print latex
#print tex2html(latex)
pass
|
tafaRU/odoo | refs/heads/8.0 | addons/sale_journal/__openerp__.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'images': ['images/invoice_type.jpeg'],
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Matt-Stammers/Python-Foundations | refs/heads/master | Simple Functions/String_Replacement(Dots).py | 1 | # simply done by escaping the . in a regex
import re
def replace_dots(str):
return re.sub(r"\.", "-", str)
# or you can use .replace()
def replace_dots(str_):
if "." in str_:
return str_.replace(".", "-")
elif str_ is "":
return ""
else:
return "no dots"
# or you can just do:
def replace_dots(string):
return string.replace('.', '-')
# or a crazy way to do it:
def replace_dots(str):
arr = []
for i in str:
if i == '.':
arr.append('-')
else:
arr.append(i)
b = ('').join(arr)
return b
|
mcella/django | refs/heads/master | tests/admin_views/forms.py | 339 | from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
class CustomAdminAuthenticationForm(AdminAuthenticationForm):
class Media:
css = {'all': ('path/to/media.css',)}
def clean_username(self):
username = self.cleaned_data.get('username')
if username == 'customform':
raise forms.ValidationError('custom form error')
return username
|
Aptitudetech/ERPNext | refs/heads/master | erpnext/patches/v7_0/update_prevdoc_values_for_supplier_quotation_item.py | 44 | import frappe
def execute():
frappe.reload_doctype('Supplier Quotation Item')
for data in frappe.db.sql(""" select prevdoc_docname, prevdoc_detail_docname, name
from `tabSupplier Quotation Item` where prevdoc_docname is not null""", as_dict=True):
frappe.db.set_value("Supplier Quotation Item", data.name, "material_request", data.prevdoc_docname)
frappe.db.set_value("Supplier Quotation Item", data.name, "material_request_item", data.prevdoc_detail_docname) |
Sawed0ff/g3_kernel | refs/heads/lp5.1 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
simartin/servo | refs/heads/master | tests/wpt/webgl/tests/py/lint/lint.py | 47 | #! /usr/bin/env python
import os
import subprocess
import re
import sys
import fnmatch
import commands
from collections import defaultdict
from optparse import OptionParser
lint_root = os.path.dirname(os.path.abspath(__file__))
repo_root = os.path.dirname(os.path.dirname(lint_root))
def git(command, *args):
args = list(args)
proc_kwargs = {"cwd": repo_root}
command_line = ["git", command] + args
try:
return subprocess.check_output(command_line, **proc_kwargs)
except subprocess.CalledProcessError:
raise
def iter_files(flag=False, floder=""):
if floder != "" and floder != None:
os.chdir(repo_root)
for pardir, subdir, files in os.walk(floder):
for item in subdir + files:
if not os.path.isdir(os.path.join(pardir, item)):
yield os.path.join(pardir, item)
os.chdir(lint_root)
else:
if not flag:
os.chdir(repo_root)
for pardir, subdir, files in os.walk(repo_root):
for item in subdir + files:
if not os.path.isdir(os.path.join(pardir, item)):
yield os.path.join(pardir, item).split(repo_root + "/")[1]
os.chdir(lint_root)
else:
for item in git("diff", "--name-status", "HEAD~1").strip().split("\n"):
status = item.split("\t")
if status[0].strip() != "D":
yield status[1]
def check_filename_space(path):
bname = os.path.basename(path)
if re.compile(" ").search(bname):
return [("FILENAME WHITESPACE", "Filename of %s contains white space" % path, None)]
return []
def check_permission(path):
bname = os.path.basename(path)
if not re.compile('\.py$|\.sh$').search(bname):
if os.access(os.path.join(repo_root, path), os.X_OK):
return [("UNNECESSARY EXECUTABLE PERMISSION", "%s contains unnecessary executable permission" % path, None)]
return []
def parse_whitelist_file(filename):
data = defaultdict(lambda:defaultdict(set))
with open(filename) as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_type, file_match, line_number = parts
data[file_match][error_type].add(line_number)
def inner(path, errors):
whitelisted = [False for item in xrange(len(errors))]
for file_match, whitelist_errors in data.iteritems():
if fnmatch.fnmatch(path, file_match):
for i, (error_type, msg, line) in enumerate(errors):
if "*" in whitelist_errors:
whitelisted[i] = True
elif error_type in whitelist_errors:
allowed_lines = whitelist_errors[error_type]
if None in allowed_lines or line in allowed_lines:
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
return inner
_whitelist_fn = None
def whitelist_errors(path, errors):
global _whitelist_fn
if _whitelist_fn is None:
_whitelist_fn = parse_whitelist_file(os.path.join(lint_root, "lint.whitelist"))
return _whitelist_fn(path, errors)
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = " $"
error = "TRAILING WHITESPACE"
class TabsRegexp(Regexp):
pattern = "^\t"
error = "INDENT TABS"
class CRRegexp(Regexp):
pattern = "\r$"
error = "CR AT EOL"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp]]
def check_regexp_line(path, f):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, "%s line %i" % (path, i+1), i+1))
return errors
def output_errors(errors):
for error_type, error, line_number in errors:
print "%s: %s" % (error_type, error)
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.iteritems())
count = sum(error_count.values())
if count == 1:
print "There was 1 error (%s)" % (by_type,)
else:
print "There were %d errors (%s)" % (count, by_type)
def main():
global repo_root
error_count = defaultdict(int)
parser = OptionParser()
parser.add_option('-p', '--pull', dest="pull_request", action='store_true', default=False)
parser.add_option("-d", '--dir', dest="dir", help="specify the checking dir, e.g. tools")
parser.add_option("-r", '--repo', dest="repo", help="specify the repo, e.g. WebGL")
options, args = parser.parse_args()
if options.pull_request == True:
options.pull_request = "WebGL"
repo_root = repo_root.replace("WebGL/sdk/tests", options.pull_request)
if options.repo == "" or options.repo == None:
options.repo = "WebGL/sdk/tests"
repo_root = repo_root.replace("WebGL/sdk/tests", options.repo)
def run_lint(path, fn, *args):
errors = whitelist_errors(path, fn(path, *args))
output_errors(errors)
for error_type, error, line in errors:
error_count[error_type] += 1
for path in iter_files(options.pull_request, options.dir):
abs_path = os.path.join(repo_root, path)
if not os.path.exists(abs_path):
continue
for path_fn in file_path_lints:
run_lint(path, path_fn)
for state_fn in file_state_lints:
run_lint(path, state_fn)
if not os.path.isdir(abs_path):
if re.compile('\.html$|\.htm$|\.xhtml$|\.xhtm$|\.frag$|\.vert$|\.js$').search(abs_path):
with open(abs_path) as f:
for file_fn in file_content_lints:
run_lint(path, file_fn, f)
f.seek(0)
output_error_count(error_count)
return sum(error_count.itervalues())
file_path_lints = [check_filename_space]
file_content_lints = [check_regexp_line]
file_state_lints = [check_permission]
if __name__ == "__main__":
error_count = main()
if error_count > 0:
sys.exit(1)
|
jhgoebbert/cvl-fabric-launcher | refs/heads/master | wsgidav/dav_provider.py | 4 | # (c) 2009-2011 Martin Wendt and contributors; see WsgiDAV http://wsgidav.googlecode.com/
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Abstract base class for DAV resource providers.
This module serves these purposes:
1. Documentation of the DAVProvider interface
2. Common base class for all DAV providers
3. Default implementation for most functionality that a resource provider must
deliver.
If no default implementation can be provided, then all write actions generate
FORBIDDEN errors. Read requests generate NOT_IMPLEMENTED errors.
**_DAVResource, DAVCollection, DAVNonCollection**
Represents an existing (i.e. mapped) WebDAV resource or collection.
A _DAVResource object is created by a call to the DAVProvider.
The resource may then be used to query different attributes like ``res.name``,
``res.isCollection``, ``res.getContentLength()``, and ``res.supportEtag()``.
It also implements operations, that require an *existing* resource, like:
``getPreferredPath()``, ``createCollection()``, or ``getPropertyValue()``.
Usage::
res = provider.getResourceInst(path, environ)
if res is not None:
print res.getName()
**DAVProvider**
A DAV provider represents a shared WebDAV system.
There is only one provider instance per share, which is created during
server start-up. After that, the dispatcher (``request_resolver.RequestResolver``)
parses the request URL and adds it to the WSGI environment, so it
can be accessed like this::
provider = environ["wsgidav.provider"]
The main purpose of the provider is to create _DAVResource objects for URLs::
res = provider.getResourceInst(path, environ)
**Supporting Objects**
The DAVProvider takes two supporting objects:
propertyManager
An object that provides storage for dead properties assigned for webDAV resources.
PropertyManagers must provide the methods as described in
``wsgidav.interfaces.propertymanagerinterface``
See property_manager.PropertyManager for a sample implementation
using shelve.
lockmMnager
An object that provides storage for locks made on webDAV resources.
LockManagers must provide the methods as described in
``wsgidav.interfaces.lockmanagerinterface``
See lock_manager.LockManager for a sample implementation
using shelve.
See `Developers info`_ for more information about the WsgiDAV architecture.
.. _`Developers info`: http://docs.wsgidav.googlecode.com/hg/html/develop.html
"""
import sys
import time
import traceback
import urllib
from datetime import datetime
from wsgidav import util, xml_tools
# Trick PyDev to do intellisense and don't produce warnings:
from util import etree #@UnusedImport
import os
if False: from xml.etree import ElementTree as etree #@Reimport @UnresolvedImport
from dav_error import DAVError, \
HTTP_NOT_FOUND, HTTP_FORBIDDEN,\
PRECONDITION_CODE_ProtectedProperty, asDAVError
__docformat__ = "reStructuredText"
_logger = util.getModuleLogger(__name__)
_standardLivePropNames = ["{DAV:}creationdate",
"{DAV:}displayname",
"{DAV:}getcontenttype",
"{DAV:}resourcetype",
"{DAV:}getlastmodified",
"{DAV:}getcontentlength",
"{DAV:}getetag",
"{DAV:}getcontentlanguage",
# "{DAV:}source", # removed in rfc4918
]
_lockPropertyNames = ["{DAV:}lockdiscovery",
"{DAV:}supportedlock"]
#DAVHRES_Continue = "continue"
#DAVHRES_Done = "done"
#===============================================================================
# _DAVResource
#===============================================================================
class _DAVResource(object):
"""Represents a single existing DAV resource instance.
A resource may be a collection (aka 'folder') or a non-collection (aka
'file').
_DAVResource is the common base class for the specialized classes::
_DAVResource
+- DAVCollection
\- DAVNonCollection
Instances of this class are created through the DAVProvider::
res = provider.getResourceInst(path, environ)
if res and res.isCollection:
print res.getDisplayName()
In the example above, res will be ``None``, if the path cannot be mapped to
an existing resource.
The following attributes and methods are considered 'cheap'::
res.path
res.provider
res.name
res.isCollection
res.environ
Querying other attributes is considered 'expensive' and may be delayed until
the first access.
getContentLength()
getContentType()
getCreationDate()
getDisplayName()
getEtag()
getLastModified()
supportRanges()
supportEtag()
supportModified()
supportContentLength()
These functions return ``None``, if the property is not available, or
not supported.
See also DAVProvider.getResourceInst().
"""
def __init__(self, path, isCollection, environ):
assert path=="" or path.startswith("/")
self.provider = environ["wsgidav.provider"]
self.path = path
self.isCollection = isCollection
self.environ = environ
self.name = util.getUriName(self.path)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.path)
# def getContentLanguage(self):
# """Contains the Content-Language header returned by a GET without accept
# headers.
#
# The getcontentlanguage property MUST be defined on any DAV compliant
# resource that returns the Content-Language header on a GET.
# """
# raise NotImplementedError()
def getContentLength(self):
"""Contains the Content-Length header returned by a GET without accept
headers.
The getcontentlength property MUST be defined on any DAV compliant
resource that returns the Content-Length header in response to a GET.
This method MUST be implemented by non-collections only.
"""
if self.isCollection:
return None
raise NotImplementedError()
def getContentType(self):
"""Contains the Content-Type header returned by a GET without accept
headers.
This getcontenttype property MUST be defined on any DAV compliant
resource that returns the Content-Type header in response to a GET.
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getcontenttype
This method MUST be implemented by non-collections only.
"""
if self.isCollection:
return None
raise NotImplementedError()
def getCreationDate(self):
"""Records the time and date the resource was created.
The creationdate property should be defined on all DAV compliant
resources. If present, it contains a timestamp of the moment when the
resource was created (i.e., the moment it had non-null state).
This method SHOULD be implemented, especially by non-collections.
"""
return None
def getDirectoryInfo(self):
"""Return a list of dictionaries with information for directory
rendering.
This default implementation return None, so the dir browser will
traverse all members.
This method COULD be implemented for collection resources.
"""
assert self.isCollection
return None
def getDisplayName(self):
"""Provides a name for the resource that is suitable for presentation to
a user.
The displayname property should be defined on all DAV compliant
resources. If present, the property contains a description of the
resource that is suitable for presentation to a user.
This default implementation returns `name`, which is the last path
segment.
"""
return self.name
def getDisplayInfo(self):
"""Return additional info dictionary for displaying (optional).
This information is not part of the DAV specification, but meant for use
by the dir browser middleware.
This default implementation returns ``{'type': '...'}``
"""
if self.isCollection:
return { "type": "Directory" }
elif os.extsep in self.name:
ext = self.name.split(os.extsep)[-1].upper()
if len(ext) < 5:
return { "type": "%s-File" % ext }
return { "type": "File" }
def getEtag(self):
"""
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getetag
This method SHOULD be implemented, especially by non-collections.
"""
return None
def getLastModified(self):
"""Contains the Last-Modified header returned by a GET method without
accept headers.
Return None, if this live property is not supported.
Note that the last-modified date on a resource may reflect changes in
any part of the state of the resource, not necessarily just a change to
the response to the GET method. For example, a change in a property may
cause the last-modified date to change. The getlastmodified property
MUST be defined on any DAV compliant resource that returns the
Last-Modified header in response to a GET.
This method SHOULD be implemented, especially by non-collections.
"""
return None
def supportRanges(self):
"""Return True, if this non-resource supports Range on GET requests.
This method MUST be implemented by non-collections only.
"""
raise NotImplementedError()
def supportContentLength(self):
"""Return True, if this resource supports Content-Length.
This default implementation checks `self.getContentLength() is None`.
"""
return self.getContentLength() is not None
def supportEtag(self):
"""Return True, if this resource supports ETags.
This default implementation checks `self.getEtag() is None`.
"""
return self.getEtag() is not None
def supportModified(self):
"""Return True, if this resource supports last modified dates.
This default implementation checks `self.getLastModified() is None`.
"""
return self.getLastModified() is not None
def getPreferredPath(self):
"""Return preferred mapping for a resource mapping.
Different URLs may map to the same resource, e.g.:
'/a/b' == '/A/b' == '/a/b/'
getPreferredPath() returns the same value for all these variants, e.g.:
'/a/b/' (assuming resource names considered case insensitive)
@param path: a UTF-8 encoded, unquoted byte string.
@return: a UTF-8 encoded, unquoted byte string.
"""
if self.path in ("", "/"):
return "/"
# Append '/' for collections
if self.isCollection and not self.path.endswith("/"):
return self.path + "/"
# TODO: handle case-sensitivity, depending on OS
# (FileSystemProvider could do this with os.path:
# (?) on unix we can assume that the path already matches exactly the case of filepath
# on windows we could use path.lower() or get the real case from the file system
return self.path
def getRefUrl(self):
"""Return the quoted, absolute, unique URL of a resource, relative to appRoot.
Byte string, UTF-8 encoded, quoted.
Starts with a '/'. Collections also have a trailing '/'.
This is basically the same as getPreferredPath, but deals with
'virtual locations' as well.
e.g. '/a/b' == '/A/b' == '/bykey/123' == '/byguid/abc532'
getRefUrl() returns the same value for all these URLs, so it can be
used as a key for locking and persistence storage.
DAV providers that allow virtual-mappings must override this method.
See also comments in DEVELOPERS.txt glossary.
"""
return urllib.quote(self.provider.sharePath + self.getPreferredPath())
# def getRefKey(self):
# """Return an unambigous identifier string for a resource.
#
# Since it is always unique for one resource, <refKey> is used as key for
# the lock- and property storage dictionaries.
#
# This default implementation calls getRefUrl(), and strips a possible
# trailing '/'.
# """
# refKey = self.getRefUrl(path)
# if refKey == "/":
# return refKey
# return refKey.rstrip("/")
def getHref(self):
"""Convert path to a URL that can be passed to XML responses.
Byte string, UTF-8 encoded, quoted.
See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3
We are using the path-absolute option. i.e. starting with '/'.
URI ; See section 3.2.1 of [RFC2068]
"""
# Nautilus chokes, if href encodes '(' as '%28'
# So we don't encode 'extra' and 'safe' characters (see rfc2068 3.2.1)
safe = "/" + "!*'()," + "$-_|."
return urllib.quote(self.provider.mountPath + self.provider.sharePath
+ self.getPreferredPath(), safe=safe)
# def getParent(self):
# """Return parent _DAVResource or None.
#
# There is NO checking, if the parent is really a mapped collection.
# """
# parentpath = util.getUriParent(self.path)
# if not parentpath:
# return None
# return self.provider.getResourceInst(parentpath)
def getMemberList(self):
"""Return a list of direct members (_DAVResource or derived objects).
This default implementation calls self.getMemberNames() and
self.getMember() for each of them.
A provider COULD overwrite this for performance reasons.
"""
if not self.isCollection:
raise NotImplementedError()
memberList = []
for name in self.getMemberNames():
member = self.getMember(name)
assert member is not None
memberList.append(member)
return memberList
def getMemberNames(self):
"""Return list of (direct) collection member names (UTF-8 byte strings).
Every provider MUST provide this method for collection resources.
"""
raise NotImplementedError()
def getDescendants(self, collections=True, resources=True,
depthFirst=False, depth="infinity", addSelf=False):
"""Return a list _DAVResource objects of a collection (children,
grand-children, ...).
This default implementation calls self.getMemberList() recursively.
This function may also be called for non-collections (with addSelf=True).
:Parameters:
depthFirst : bool
use <False>, to list containers before content.
(e.g. when moving / copying branches.)
Use <True>, to list content before containers.
(e.g. when deleting branches.)
depth : string
'0' | '1' | 'infinity'
"""
assert depth in ("0", "1", "infinity")
res = []
if addSelf and not depthFirst:
res.append(self)
if depth != "0" and self.isCollection:
for child in self.getMemberList():
if not child:
_ = self.getMemberList()
want = (collections and child.isCollection) or (resources and not child.isCollection)
if want and not depthFirst:
res.append(child)
if child.isCollection and depth == "infinity":
res.extend(child.getDescendants(collections, resources, depthFirst, depth, addSelf=False))
if want and depthFirst:
res.append(child)
if addSelf and depthFirst:
res.append(self)
return res
# --- Properties -----------------------------------------------------------
def getPropertyNames(self, isAllProp):
"""Return list of supported property names in Clark Notation.
Note that 'allprop', despite its name, which remains for
backward-compatibility, does not return every property, but only dead
properties and the live properties defined in RFC4918.
This default implementation returns a combination of:
- Supported standard live properties in the {DAV:} namespace, if the
related getter method returns not None.
- {DAV:}lockdiscovery and {DAV:}supportedlock, if a lock manager is
present
- If a property manager is present, then a list of dead properties is
appended
A resource provider may override this method, to add a list of
supported custom live property names.
"""
## Live properties
propNameList = []
propNameList.append("{DAV:}resourcetype")
if self.getCreationDate() is not None:
propNameList.append("{DAV:}creationdate")
if self.getContentLength() is not None:
assert not self.isCollection
propNameList.append("{DAV:}getcontentlength")
if self.getContentType() is not None:
propNameList.append("{DAV:}getcontenttype")
if self.getLastModified() is not None:
propNameList.append("{DAV:}getlastmodified")
if self.getDisplayName() is not None:
propNameList.append("{DAV:}displayname")
if self.getEtag() is not None:
propNameList.append("{DAV:}getetag")
## Locking properties
if self.provider.lockManager and not self.preventLocking():
propNameList.extend(_lockPropertyNames)
## Dead properties
if self.provider.propManager:
refUrl = self.getRefUrl()
propNameList.extend(self.provider.propManager.getProperties(refUrl))
return propNameList
def getProperties(self, mode, nameList=None):
"""Return properties as list of 2-tuples (name, value).
If mode is 'propname', then None is returned for the value.
name
the property name in Clark notation.
value
may have different types, depending on the status:
- string or unicode: for standard property values.
- etree.Element: for complex values.
- DAVError in case of errors.
- None: if mode == 'propname'.
@param mode: "allprop", "propname", or "named"
@param nameList: list of property names in Clark Notation (required for mode 'named')
This default implementation basically calls self.getPropertyNames() to
get the list of names, then call self.getPropertyValue on each of them.
"""
assert mode in ("allprop", "propname", "named")
if mode in ("allprop", "propname"):
# TODO: 'allprop' could have nameList, when <include> option is
# implemented
assert nameList is None
nameList = self.getPropertyNames(mode == "allprop")
else:
assert nameList is not None
propList = []
namesOnly = (mode == "propname")
for name in nameList:
try:
if namesOnly:
propList.append( (name, None) )
else:
value = self.getPropertyValue(name)
propList.append( (name, value) )
except DAVError, e:
propList.append( (name, e) )
except Exception, e:
propList.append( (name, asDAVError(e)) )
if self.provider.verbose >= 2:
traceback.print_exc(10, sys.stdout)
return propList
def getPropertyValue(self, propname):
"""Return the value of a property.
propname:
the property name in Clark notation.
return value:
may have different types, depending on the status:
- string or unicode: for standard property values.
- lxml.etree.Element: for complex values.
If the property is not available, a DAVError is raised.
This default implementation handles ``{DAV:}lockdiscovery`` and
``{DAV:}supportedlock`` using the associated lock manager.
All other *live* properties (i.e. propname starts with ``{DAV:}``) are
delegated to the self.xxx() getters.
Finally, other properties are considered *dead*, and are handled by
the associated property manager.
"""
refUrl = self.getRefUrl()
# lock properties
lm = self.provider.lockManager
if lm and propname == "{DAV:}lockdiscovery":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
activelocklist = lm.getUrlLockList(refUrl)
lockdiscoveryEL = etree.Element(propname)
for lock in activelocklist:
activelockEL = etree.SubElement(lockdiscoveryEL, "{DAV:}activelock")
locktypeEL = etree.SubElement(activelockEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}%s" % lock["type"])
lockscopeEL = etree.SubElement(activelockEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}%s" % lock["scope"])
etree.SubElement(activelockEL, "{DAV:}depth").text = lock["depth"]
# lock["owner"] is an XML string
ownerEL = xml_tools.stringToXML(lock["owner"])
activelockEL.append(ownerEL)
timeout = lock["timeout"]
if timeout < 0:
timeout = "Infinite"
else:
timeout = "Second-" + str(long(timeout - time.time()))
etree.SubElement(activelockEL, "{DAV:}timeout").text = timeout
locktokenEL = etree.SubElement(activelockEL, "{DAV:}locktoken")
etree.SubElement(locktokenEL, "{DAV:}href").text = lock["token"]
# TODO: this is ugly:
# res.getPropertyValue("{DAV:}lockdiscovery")
#
# lockRoot = self.getHref(self.provider.refUrlToPath(lock["root"]))
lockPath = self.provider.refUrlToPath(lock["root"])
lockRes = self.provider.getResourceInst(lockPath, self.environ)
# FIXME: test for None
lockHref = lockRes.getHref()
# print "lockedRoot: %s -> href=%s" % (lockPath, lockHref)
lockrootEL = etree.SubElement(activelockEL, "{DAV:}lockroot")
etree.SubElement(lockrootEL, "{DAV:}href").text = lockHref
return lockdiscoveryEL
elif lm and propname == "{DAV:}supportedlock":
# TODO: we return HTTP_NOT_FOUND if no lockmanager is present. Correct?
# TODO: the lockmanager should decide about it's features
supportedlockEL = etree.Element(propname)
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}exclusive")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
lockentryEL = etree.SubElement(supportedlockEL, "{DAV:}lockentry")
lockscopeEL = etree.SubElement(lockentryEL, "{DAV:}lockscope")
etree.SubElement(lockscopeEL, "{DAV:}shared")
locktypeEL = etree.SubElement(lockentryEL, "{DAV:}locktype")
etree.SubElement(locktypeEL, "{DAV:}write")
return supportedlockEL
elif propname.startswith("{DAV:}"):
# Standard live property (raises HTTP_NOT_FOUND if not supported)
if propname == "{DAV:}creationdate" and self.getCreationDate() is not None:
# Note: uses RFC3339 format (ISO 8601)
return util.getRfc3339Time(self.getCreationDate())
elif propname == "{DAV:}getcontenttype" and self.getContentType() is not None:
return self.getContentType()
elif propname == "{DAV:}resourcetype":
if self.isCollection:
resourcetypeEL = etree.Element(propname)
etree.SubElement(resourcetypeEL, "{DAV:}collection")
return resourcetypeEL
return ""
elif propname == "{DAV:}getlastmodified" and self.getLastModified() is not None:
# Note: uses RFC1123 format
return util.getRfc1123Time(self.getLastModified())
elif propname == "{DAV:}getcontentlength" and self.getContentLength() is not None:
# Note: must be a numeric string
return str(self.getContentLength())
elif propname == "{DAV:}getetag" and self.getEtag() is not None:
return self.getEtag()
elif propname == "{DAV:}displayname" and self.getDisplayName() is not None:
return self.getDisplayName()
# Unsupported, no persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
# Dead property
pm = self.provider.propManager
if pm:
value = pm.getProperty(refUrl, propname)
if value is not None:
return xml_tools.stringToXML(value)
# No persistence available, or property not found
raise DAVError(HTTP_NOT_FOUND)
def setPropertyValue(self, propname, value, dryRun=False):
"""Set a property value or remove a property.
value == None means 'remove property'.
Raise HTTP_FORBIDDEN if property is read-only, or not supported.
When dryRun is True, this function should raise errors, as in a real
run, but MUST NOT change any data.
This default implementation
- raises HTTP_FORBIDDEN, if trying to modify a locking property
- raises HTTP_FORBIDDEN, if trying to modify a {DAV:} property
- stores everything else as dead property, if a property manager is
present.
- raises HTTP_FORBIDDEN, else
Removing a non-existing prop is NOT an error.
Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected'
A resource provider may override this method, to update supported custom
live properties.
"""
assert value is None or isinstance(value, (etree._Element))
if propname in _lockPropertyNames:
# Locking properties are always read-only
raise DAVError(HTTP_FORBIDDEN,
errcondition=PRECONDITION_CODE_ProtectedProperty)
# Dead property
pm = self.provider.propManager
if pm and not propname.startswith("{DAV:}"):
refUrl = self.getRefUrl()
if value is None:
return pm.removeProperty(refUrl, propname)
else:
value = etree.tostring(value)
return pm.writeProperty(refUrl, propname, value, dryRun)
raise DAVError(HTTP_FORBIDDEN)
def removeAllProperties(self, recursive):
"""Remove all associated dead properties."""
if self.provider.propManager:
self.provider.propManager.removeProperties(self.getRefUrl())
# --- Locking --------------------------------------------------------------
def preventLocking(self):
"""Return True, to prevent locking.
This default implementation returns ``False``, so standard processing
takes place: locking (and refreshing of locks) is implemented using
the lock manager, if one is configured.
"""
return False
def isLocked(self):
"""Return True, if URI is locked."""
if self.provider.lockManager is None:
return False
return self.provider.lockManager.isUrlLocked(self.getRefUrl())
def removeAllLocks(self, recursive):
if self.provider.lockManager:
self.provider.lockManager.removeAllLocksFromUrl(self.getRefUrl())
# --- Read / write ---------------------------------------------------------
def createEmptyResource(self, name):
"""Create and return an empty (length-0) resource as member of self.
Called for LOCK requests on unmapped URLs.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
Returns a DAVResuource.
This method MUST be implemented by all providers that support write
access.
This default implementation simply raises HTTP_FORBIDDEN.
"""
assert self.isCollection
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
"""Create a new collection as member of self.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
This method MUST be implemented by all providers that support write
access.
This default implementation raises HTTP_FORBIDDEN.
"""
assert self.isCollection
raise DAVError(HTTP_FORBIDDEN)
def getContent(self):
"""Open content as a stream for reading.
Returns a file-like object / stream containing the contents of the
resource specified.
The calling application will close() the stream.
This method MUST be implemented by all providers.
"""
assert not self.isCollection
raise NotImplementedError()
def beginWrite(self, contentType=None):
"""Open content as a stream for writing.
This method MUST be implemented by all providers that support write
access.
"""
assert not self.isCollection
raise DAVError(HTTP_FORBIDDEN)
def endWrite(self, withErrors):
"""Called when PUT has finished writing.
This is only a notification. that MAY be handled.
"""
pass
def handleDelete(self):
"""Handle a DELETE request natively.
This method is called by the DELETE handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handleDelete() did not do anything. WsgiDAV will process the request
by calling delete() for every resource, bottom-up.
True:
handleDelete() has successfully performed the DELETE request.
HTTP_NO_CONTENT will be reported to the DAV client.
List of errors:
handleDelete() tried to perform the delete request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handleDelete() refuses to perform the delete request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example deleting '/by_tag/cool/myres' may simply remove the 'cool'
tag from 'my_res'.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def supportRecursiveDelete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there).
This method MUST be implemented for collections (not called on
non-collections).
"""
assert self.isCollection
raise NotImplementedError()
def delete(self):
"""Remove this resource (recursive).
Preconditions (ensured by caller):
- there are no conflicting locks or If-headers
- if supportRecursiveDelete() is False, and this is a collection,
all members have already been deleted.
When supportRecursiveDelete is True, this method must be prepared to
handle recursive deletes. This implies that child errors must be
reported as tuple list [ (<ref-url>, <DAVError>), ... ].
See http://www.webdav.org/specs/rfc4918.html#delete-collections
This function
- removes this resource
- if this is a non-empty collection, also removes all members.
Note that this may only occur, if supportRecursiveDelete is True.
- For recursive deletes, return a list of error tuples for all failed
resource paths.
- removes associated direct locks
- removes associated dead properties
- raises HTTP_FORBIDDEN for read-only resources
- raises HTTP_INTERNAL_ERROR on error
This method MUST be implemented by all providers that support write
access.
"""
raise NotImplementedError()
def handleCopy(self, destPath, depthInfinity):
"""Handle a COPY request natively.
This method is called by the COPY handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handleCopy() did not do anything. WsgiDAV will process the request
by calling copyMoveSingle() for every resource, bottom-up.
True:
handleCopy() has successfully performed the COPY request.
HTTP_NO_CONTENT/HTTP_CREATED will be reported to the DAV client.
List of errors:
handleCopy() tried to perform the copy request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handleCopy() refuses to perform the copy request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example copying '/by_tag/cool/myres' to '/by_tag/hot/myres' may
simply add a 'hot' tag.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def copyMoveSingle(self, destPath, isMove):
"""Copy or move this resource to destPath (non-recursive).
Preconditions (ensured by caller):
- there must not be any conflicting locks on destination
- overwriting is only allowed (i.e. destPath exists), when source and
dest are of the same type ((non-)collections) and a Overwrite='T'
was passed
- destPath must not be a child path of this resource
This function
- Overwrites non-collections content, if destination exists.
- MUST NOT copy collection members.
- MUST NOT copy locks.
- SHOULD copy live properties, when appropriate.
E.g. displayname should be copied, but creationdate should be
reset if the target did not exist before.
See http://www.webdav.org/specs/rfc4918.html#dav.properties
- SHOULD copy dead properties.
- raises HTTP_FORBIDDEN for read-only providers
- raises HTTP_INTERNAL_ERROR on error
When isMove is True,
- Live properties should be moved too (e.g. creationdate)
- Non-collections must be moved, not copied
- For collections, this function behaves like in copy-mode:
detination collection must be created and properties are copied.
Members are NOT created.
The source collection MUST NOT be removed.
This method MUST be implemented by all providers that support write
access.
"""
raise NotImplementedError()
def handleMove(self, destPath):
"""Handle a MOVE request natively.
This method is called by the MOVE handler after checking for valid
request syntax and making sure that there are no conflicting locks and
If-headers.
Depending on the return value, this provider can control further
processing:
False:
handleMove() did not do anything. WsgiDAV will process the request
by calling delete() and copyMoveSingle() for every resource,
bottom-up.
True:
handleMove() has successfully performed the MOVE request.
HTTP_NO_CONTENT/HTTP_CREATED will be reported to the DAV client.
List of errors:
handleMove() tried to perform the move request, but failed
completely or partially. A list of errors is returned like
``[ (<ref-url>, <DAVError>), ... ]``
These errors will be reported to the client.
DAVError raised:
handleMove() refuses to perform the move request. The DAVError
will be reported to the client.
An implementation may choose to apply other semantics and return True.
For example moving '/by_tag/cool/myres' to '/by_tag/hot/myres' may
simply remove the 'cool' tag from 'my_res' and add a 'hot' tag instead.
In this case, the resource might still be available by other URLs, so
locks and properties are not removed.
This default implementation returns ``False``, so standard processing
takes place.
Implementation of this method is OPTIONAL.
"""
return False
def supportRecursiveMove(self, destPath):
"""Return True, if moveRecursive() is available (see comments there)."""
assert self.isCollection
raise NotImplementedError()
def moveRecursive(self, destPath):
"""Move this resource and members to destPath.
This method is only called, when supportRecursiveMove() returns True.
MOVE is frequently used by clients to rename a file without changing its
parent collection, so it's not appropriate to reset all live properties
that are set at resource creation. For example, the DAV:creationdate
property value SHOULD remain the same after a MOVE.
Preconditions (ensured by caller):
- there must not be any conflicting locks or If-header on source
- there must not be any conflicting locks or If-header on destination
- destPath must not exist
- destPath must not be a member of this resource
This method must be prepared to handle recursive moves. This implies
that child errors must be reported as tuple list
[ (<ref-url>, <DAVError>), ... ].
See http://www.webdav.org/specs/rfc4918.html#move-collections
This function
- moves this resource and all members to destPath.
- MUST NOT move associated locks.
Instead, if the source (or children thereof) have locks, then
these locks should be removed.
- SHOULD maintain associated live properties, when applicable
See http://www.webdav.org/specs/rfc4918.html#dav.properties
- MUST maintain associated dead properties
- raises HTTP_FORBIDDEN for read-only resources
- raises HTTP_INTERNAL_ERROR on error
An implementation may choose to apply other semantics.
For example copying '/by_tag/cool/myres' to '/by_tag/new/myres' may
simply add a 'new' tag to 'my_res'.
This method is only called, when self.supportRecursiveMove() returns
True. Otherwise, the request server implements MOVE using delete/copy.
This method MAY be implemented in order to improve performance.
"""
raise DAVError(HTTP_FORBIDDEN)
def resolve(self, scriptName, pathInfo):
"""Return a _DAVResource object for the path (None, if not found).
`pathInfo`: is a URL relative to this object.
DAVCollection.resolve() provides an implementation.
"""
raise NotImplementedError()
#===============================================================================
# DAVCollection
#===============================================================================
class DAVNonCollection(_DAVResource):
"""
A DAVNonCollection is a _DAVResource, that has content (like a 'file' on
a filesystem).
A DAVNonCollecion is able to read and write file content.
See also _DAVResource
"""
def __init__(self, path, environ):
_DAVResource.__init__(self, path, False, environ)
def getContentLength(self):
"""Returns the byte length of the content.
MUST be implemented.
See also _DAVResource.getContentLength()
"""
raise NotImplementedError()
def getContentType(self):
"""Contains the Content-Type header returned by a GET without accept
headers.
This getcontenttype property MUST be defined on any DAV compliant
resource that returns the Content-Type header in response to a GET.
See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getcontenttype
"""
raise NotImplementedError()
def getContent(self):
"""Open content as a stream for reading.
Returns a file-like object / stream containing the contents of the
resource specified.
The application will close() the stream.
This method MUST be implemented by all providers.
"""
raise NotImplementedError()
def supportRanges(self):
"""Return True, if this non-resource supports Range on GET requests.
This default implementation returns False.
"""
return False
def beginWrite(self, contentType=None):
"""Open content as a stream for writing.
This method MUST be implemented by all providers that support write
access.
"""
raise DAVError(HTTP_FORBIDDEN)
def endWrite(self, withErrors):
"""Called when PUT has finished writing.
This is only a notification that MAY be handled.
"""
pass
def resolve(self, scriptName, pathInfo):
"""Return a _DAVResource object for the path (None, if not found).
Since non-collection don't have members, we return None if path is not
empty.
"""
if pathInfo in ("", "/"):
return self
return None
#===============================================================================
# DAVCollection
#===============================================================================
class DAVCollection(_DAVResource):
"""
A DAVCollection is a _DAVResource, that has members (like a 'folder' on
a filesystem).
A DAVCollecion 'knows' its members, and how to obtain them from the backend
storage.
There is also optional built-in support for member caching.
See also _DAVResource
"""
def __init__(self, path, environ):
_DAVResource.__init__(self, path, True, environ)
# Allow caching of members
# self.memberCache = {"enabled": False,
# "expire": 10, # Purge, if not used for n seconds
# "maxAge": 60, # Force purge, if older than n seconds
# "created": None,
# "lastUsed": None,
# "members": None,
# }
# def _cacheSet(self, members):
# if self.memberCache["enabled"]:
# if not members:
# # We cannot cache None, because _cacheGet() == None means 'not in cache'
# members = []
# self.memberCache["created"] = self.memberCache["lastUsed"] = datetime.now()
# self.memberCache["members"] = members
#
# def _cacheGet(self):
# if not self.memberCache["enabled"]:
# return None
# now = datetime.now()
# if (now - self.memberCache["lastUsed"]) > self.memberCache["expire"]:
# return None
# elif (now - self.memberCache["created"]) > self.memberCache["maxAge"]:
# return None
# self.memberCache["lastUsed"] = datetime.now()
# return self.memberCache["members"]
#
# def _cachePurge(self):
# self.memberCache["created"] = self.memberCache["lastUsed"] = self.memberCache["members"] = None
# def getContentLanguage(self):
# return None
def getContentLength(self):
return None
def getContentType(self):
return None
def createEmptyResource(self, name):
"""Create and return an empty (length-0) resource as member of self.
Called for LOCK requests on unmapped URLs.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
Returns a DAVResuource.
This method MUST be implemented by all providers that support write
access.
This default implementation simply raises HTTP_FORBIDDEN.
"""
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
"""Create a new collection as member of self.
Preconditions (to be ensured by caller):
- this must be a collection
- <self.path + name> must not exist
- there must be no conflicting locks
This method MUST be implemented by all providers that support write
access.
This default implementation raises HTTP_FORBIDDEN.
"""
assert self.isCollection
raise DAVError(HTTP_FORBIDDEN)
def getMember(self, name):
"""Return child resource with a given name (None, if not found).
This method COULD be overridden by a derived class, for performance
reasons.
This default implementation calls self.provider.getResourceInst().
"""
assert self.isCollection
return self.provider.getResourceInst(util.joinUri(self.path, name),
self.environ)
def getMemberNames(self):
"""Return list of (direct) collection member names (UTF-8 byte strings).
This method MUST be implemented.
"""
assert self.isCollection
raise NotImplementedError()
def supportRecursiveDelete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there).
This default implementation returns False.
"""
return False
def delete(self):
"""Remove this resource (possibly recursive).
This method MUST be implemented if resource allows write access.
See _DAVResource.delete()
"""
raise DAVError(HTTP_FORBIDDEN)
def copyMoveSingle(self, destPath, isMove):
"""Copy or move this resource to destPath (non-recursive).
This method MUST be implemented if resource allows write access.
See _DAVResource.copyMoveSingle()
"""
raise DAVError(HTTP_FORBIDDEN)
def supportRecursiveMove(self, destPath):
"""Return True, if moveRecursive() is available (see comments there)."""
return False
def moveRecursive(self, destPath):
"""Move this resource and members to destPath.
This method MAY be implemented in order to improve performance.
"""
raise DAVError(HTTP_FORBIDDEN)
def resolve(self, scriptName, pathInfo):
"""Return a _DAVResource object for the path (None, if not found).
`pathInfo`: is a URL relative to this object.
"""
if pathInfo in ("", "/"):
return self
assert pathInfo.startswith("/")
name, rest = util.popPath(pathInfo)
res = self.getMember(name)
if res is None or rest in ("", "/"):
return res
return res.resolve(util.joinUri(scriptName, name), rest)
#===============================================================================
# DAVProvider
#===============================================================================
class DAVProvider(object):
"""Abstract base class for DAV resource providers.
There will be only one DAVProvider instance per share (not per request).
"""
def __init__(self):
self.mountPath = ""
self.sharePath = None
self.lockManager = None
self.propManager = None
self.verbose = 2
self._count_getResourceInst = 0
self._count_getResourceInstInit = 0
# self.caseSensitiveUrls = True
def __repr__(self):
return self.__class__.__name__
def setMountPath(self, mountPath):
"""Set application root for this resource provider.
This is the value of SCRIPT_NAME, when WsgiDAVApp is called.
"""
assert mountPath in ("", "/") or not mountPath.endswith("/")
self.mountPath = mountPath
def setSharePath(self, sharePath):
"""Set application location for this resource provider.
@param sharePath: a UTF-8 encoded, unquoted byte string.
"""
if isinstance(sharePath, unicode):
sharePath = sharePath.encode("utf8")
assert sharePath=="" or sharePath.startswith("/")
if sharePath == "/":
sharePath = "" # This allows to code 'absPath = sharePath + path'
assert sharePath in ("", "/") or not sharePath.endswith("/")
self.sharePath = sharePath
def setLockManager(self, lockManager):
assert not lockManager or hasattr(lockManager, "checkWritePermission"), "Must be compatible with wsgidav.lock_manager.LockManager"
self.lockManager = lockManager
def setPropManager(self, propManager):
assert not propManager or hasattr(propManager, "copyProperties"), "Must be compatible with wsgidav.property_manager.PropertyManager"
self.propManager = propManager
def refUrlToPath(self, refUrl):
"""Convert a refUrl to a path, by stripping the share prefix.
Used to calculate the <path> from a storage key by inverting getRefUrl().
"""
return "/" + urllib.unquote(util.lstripstr(refUrl, self.sharePath)).lstrip("/")
def getResourceInst(self, path, environ):
"""Return a _DAVResource object for path.
Should be called only once per request and resource::
res = provider.getResourceInst(path, environ)
if res and not res.isCollection:
print res.getContentType()
If <path> does not exist, None is returned.
<environ> may be used by the provider to implement per-request caching.
See _DAVResource for details.
This method MUST be implemented.
"""
raise NotImplementedError()
def exists(self, path, environ):
"""Return True, if path maps to an existing resource.
This method should only be used, if no other information is queried
for <path>. Otherwise a _DAVResource should be created first.
This method SHOULD be overridden by a more efficient implementation.
"""
return self.getResourceInst(path, environ) is not None
def isCollection(self, path, environ):
"""Return True, if path maps to an existing collection resource.
This method should only be used, if no other information is queried
for <path>. Otherwise a _DAVResource should be created first.
"""
res = self.getResourceInst(path, environ)
return res and res.isCollection
|
hantek/fuel | refs/heads/master | tests/test_adult.py | 16 | import numpy
from numpy.testing import assert_raises, assert_equal, assert_allclose
from fuel.datasets import Adult
from tests import skip_if_not_available
def test_adult_test():
skip_if_not_available(datasets=['adult.hdf5'])
dataset = Adult(('test',), load_in_memory=False)
handle = dataset.open()
data, labels = dataset.get_data(handle, slice(0, 10))
assert data.shape == (10, 104)
assert labels.shape == (10, 1)
known = numpy.array(
[25., 38., 28., 44., 34., 63., 24., 55., 65., 36.])
assert_allclose(data[:, 0], known)
assert dataset.num_examples == 15060
dataset.close(handle)
dataset = Adult(('train',), load_in_memory=False)
handle = dataset.open()
data, labels = dataset.get_data(handle, slice(0, 10))
assert data.shape == (10, 104)
assert labels.shape == (10, 1)
known = numpy.array(
[39., 50., 38., 53., 28., 37., 49., 52., 31., 42.])
assert_allclose(data[:, 0], known)
assert dataset.num_examples == 30162
dataset.close(handle)
def test_adult_axes():
skip_if_not_available(datasets=['adult.hdf5'])
dataset = Adult(('test',), load_in_memory=False)
assert_equal(dataset.axis_labels['features'],
('batch', 'feature'))
dataset = Adult(('train',), load_in_memory=False)
assert_equal(dataset.axis_labels['features'],
('batch', 'feature'))
def test_adult_invalid_split():
skip_if_not_available(datasets=['adult.hdf5'])
assert_raises(ValueError, Adult, ('dummy',))
|
B-MOOC/edx-platform | refs/heads/master | common/lib/capa/capa/tests/test_answer_pool.py | 196 | """
Tests the logic of the "answer-pool" attribute, e.g.
<choicegroup answer-pool="4">
"""
import unittest
import textwrap
from . import test_capa_system, new_loncapa_problem
from capa.responsetypes import LoncapaProblemError
class CapaAnswerPoolTest(unittest.TestCase):
"""Capa Answer Pool Test"""
def setUp(self):
super(CapaAnswerPoolTest, self).setUp()
self.system = test_capa_system()
# XML problem setup used by a few tests.
common_question_xml = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed1(self):
problem = new_loncapa_problem(self.common_question_xml, seed=723)
the_html = problem.get_html()
# [('choice_3', u'wrong-3'), ('choice_5', u'correct-2'), ('choice_1', u'wrong-2'), ('choice_4', u'wrong-4')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(response.has_answerpool())
self.assertEqual(response.unmask_order(), ['choice_3', 'choice_5', 'choice_1', 'choice_4'])
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed2(self):
problem = new_loncapa_problem(self.common_question_xml, seed=9)
the_html = problem.get_html()
# [('choice_0', u'wrong-1'), ('choice_4', u'wrong-4'), ('choice_3', u'wrong-3'), ('choice_2', u'correct-1')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(hasattr(response, 'has_answerpool'))
self.assertEqual(response.unmask_order(), ['choice_0', 'choice_4', 'choice_3', 'choice_2'])
def test_no_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_0_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="0">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_invalid_answer_pool_value(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2.3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "answer-pool"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_none_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="false">wrong!!</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_all_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="true">!wrong-1</choice>
<choice correct="true">!wrong-2</choice>
<choice correct="true">!wrong-3</choice>
<choice correct="true">!wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_answer_pool_5_choices_1_multiplechoiceresponse_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="5">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'correct-2'.*'wrong-1'.*'wrong-2'.*.*'wrong-3'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertEqual(response.unmask_order(), ['choice_5', 'choice_0', 'choice_1', 'choice_3', 'choice_4'])
def test_answer_pool_2_multiplechoiceresponses_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-1'.*'correct-2'.*\].*</div>" # rng shared
# str2 = r"<div>.*\[.*'correct-2'.*'wrong-2'.*'wrong-3'.*\].*</div>" # rng independent
str3 = r"<div>\{.*'1_solution_2'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_2_multiplechoiceresponses_seed2(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=9)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_random_consistent(self):
"""
The point of this test is to make sure that the exact randomization
per seed does not change.
"""
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = (r"<div>.*\[.*'correct-2'.*'wrong-2'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'correct-2'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'correct-1'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*\].*</div>")
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1)
def test_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
self.assertRegexpMatches(the_html, str1)
# attributes *not* present
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_answer_pool_and_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_3'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_without_solutionset(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
|
kaushik94/boto | refs/heads/develop | boto/cloudfront/logging.py | 219 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class LoggingInfo(object):
def __init__(self, bucket='', prefix=''):
self.bucket = bucket
self.prefix = prefix
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Bucket':
self.bucket = value
elif name == 'Prefix':
self.prefix = value
else:
setattr(self, name, value)
|
home-assistant/home-assistant | refs/heads/dev | tests/components/ecobee/test_util.py | 9 | """Tests for the ecobee.util module."""
import pytest
import voluptuous as vol
from homeassistant.components.ecobee.util import ecobee_date, ecobee_time
async def test_ecobee_date_with_valid_input():
"""Test that the date function returns the expected result."""
test_input = "2019-09-27"
assert ecobee_date(test_input) == test_input
async def test_ecobee_date_with_invalid_input():
"""Test that the date function raises the expected exception."""
test_input = "20190927"
with pytest.raises(vol.Invalid):
ecobee_date(test_input)
async def test_ecobee_time_with_valid_input():
"""Test that the time function returns the expected result."""
test_input = "20:55:15"
assert ecobee_time(test_input) == test_input
async def test_ecobee_time_with_invalid_input():
"""Test that the time function raises the expected exception."""
test_input = "20:55"
with pytest.raises(vol.Invalid):
ecobee_time(test_input)
|
muntasirsyed/intellij-community | refs/heads/master | python/testData/highlighting/returnWithArgumentsInGenerator.py | 32 | def <info descr="null">f</info>():
yield 42
<error descr="Python versions < 3.3 do not allow 'return' with argument inside generator.">return 28</error>
|
juliarizza/script-amigo-secreto | refs/heads/master | amigo_secreto.py | 1 | # -*- coding: utf-8 -*-
import os
from sys import platform
from random import randint
## Inicializa a lista de amigos que vão participar do sorteio
amigos = []
## Preenche a lista de amigos que vão participar do sorteio
qtd_amigos = int(input("Quantos amigos seu grupo possui? "))
for i in range(qtd_amigos):
amigo = input("Qual o nome do {0}º amigo? ".format(i+1))
amigos.append({'id': i+1, 'nome': amigo})
## Inicializa a lista onde ficará o resultado do sorteio
sorteio = []
def sortear(sorteando, amigos, sorteados, sorteio, contador):
## Verifica se a quantidade de chamadas recursivas não está próxima
## de ultrapassar a quantidade máxima
## Se estiver, retornamos False para recomeçar o sorteio
contador += 1
if contador > 900:
return False
## Sorteia um amigo
sorteado = amigos[randint(0,qtd_amigos-1)]
## Verifica se o amigo sorteado já não foi sorteado por outro
requisito_1 = (sorteado['id'] in sorteados)
## Verifica se o amigo sorteado já não sorteou quem o está sorteando
## Só evita aquelas coisas chatas de um sair com o outro e o outro com o um
## É opcional, você pode remover :)
requisito_2 = ([x for x in sorteio if x['sorteante'] == sorteando['id'] and \
x['sorteado'] == sorteando['id']])
## Verifica se quem sorteia não sorteou ele mesmo
requisito_3 = (sorteado['id'] == sorteando['id'])
if (requisito_1 or requisito_2 or requisito_3):
## Se qualquer um dos requisitos acima for verdadeiro
## realiza-se o sorteio novamente até que encontre um resultado satisfatório
sortear(sorteando, amigos, sorteados, sorteio, contador)
else:
## Se não, adicionamos o resultado do sorteio na lista de resultados
sorteio.append({'sorteante': sorteando['id'], 'sorteado':sorteado['id']})
return True
## Enquanto a função sortear retornar False e não tiver um sorteio satisfatório
## o sorteio será realizado novamente
while len(sorteio) != qtd_amigos:
sorteio = []
for rodada in range(qtd_amigos):
## O sorteio é feito um por um e sempre conferido
sorteados = [x['sorteado'] for x in sorteio]
## Contador de chamadas recursivas
contador = 0
sortear(amigos[rodada], amigos, sorteados, sorteio, contador)
## Abre arquivo txt de resultado e escreve "Resultado do sorteio nele"
file = open("resultado.txt", "w")
file.write("Resultado do sorteio: \n")
## Divulga o resultado do sorteio
for rodada in sorteio:
for amigo in amigos:
if rodada['sorteante'] == amigo['id']:
sorteante = amigo['nome']
elif rodada['sorteado'] == amigo['id']:
sorteado = amigo['nome']
## Sempre que um novo resultado for exibido, a tela da linha de comando é
## limpa de forma que o próximo amigo não veja o sorteado pelo anterior
## Não queremos estragar a surpresa né ;)
if platform == 'linux2' or platform == 'darwin' or platform == 'linux':
os.system("clear")
elif platform == 'win32' or platform == 'cygwin':
os.system("cls")
input("Por favor, chame o amigo {0} e pressione ENTER para ver quem ele sorteou.".format(sorteante))
input("Você sorteou o amigo: {0}\n\nPressione ENTER para continuar.".format(sorteado))
## Escreve no arquivo resultado.txt "Fulando sorteou Ciclano"
file.write("{0} sorteou {1}\n" .format(sorteante, sorteado))
## Fecha o arquivo resultado.txt
file.close()
print("Sorteio encerrado. Divirta-se!")
|
bionoid/kivy | refs/heads/master | kivy/tests/test_window_info.py | 8 | from kivy.tests.common import GraphicUnitTest
from kivy import setupconfig
class WindowInfoTest(GraphicUnitTest):
def test_window_info_nonzero(self):
from kivy.core.window import Window
window_info = Window.get_window_info()
if window_info is None:
return
if setupconfig.USE_X11:
from kivy.core.window.window_info import WindowInfoX11
if isinstance(window_info, WindowInfoX11):
self.assertNotEqual(window_info.display, 0)
self.assertNotEqual(window_info.window, 0)
if setupconfig.USE_WAYLAND:
from kivy.core.window.window_info import WindowInfoWayland
if isinstance(window_info, WindowInfoWayland):
self.assertNotEqual(window_info.display, 0)
self.assertNotEqual(window_info.surface, 0)
self.assertNotEqual(window_info.shell_surface, 0)
if setupconfig.PLATFORM == 'win32':
from kivy.core.window.window_info import WindowInfoWindows
if isinstance(window_info, WindowInfoWindows):
self.assertNotEqual(window_info.HWND, 0)
self.assertNotEqual(window_info.HDC, 0)
|
badlogicmanpreet/nupic | refs/heads/master | examples/opf/clients/hotgym/anomaly/one_gym/remove_tuesdays.py | 15 | import csv
import shutil
import datetime
ORIGINAL = "rec-center-hourly.csv"
BACKUP = "rec-center-hourly-backup.csv"
DATE_FORMAT = "%m/%d/%y %H:%M"
def isTuesday(date):
return date.weekday() is 1
def withinOctober(date):
return datetime.datetime(2010, 10, 1) <= date < datetime.datetime(2010, 11, 1)
def run():
# Backup original
shutil.copyfile(ORIGINAL, BACKUP)
with open(ORIGINAL, 'rb') as inputFile:
reader = csv.reader(inputFile)
outputCache = ""
headers = reader.next()
types = reader.next()
flags = reader.next()
for row in [headers, types, flags]:
outputCache += ",".join(row) + "\n"
for row in reader:
dateString = row[0]
date = datetime.datetime.strptime(dateString, DATE_FORMAT)
consumption = float(row[1])
if isTuesday(date) and withinOctober(date):
consumption = 5.0
outputCache += "%s,%f\n" % (dateString, consumption)
with open(ORIGINAL, 'wb') as outputFile:
outputFile.write(outputCache)
if __name__ == "__main__":
run()
|
gskywolf/Firefly-RK3288-Kernel | refs/heads/master | tools/perf/scripts/python/net_dropmonitor.py | 2669 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
martinling/imusim | refs/heads/master | imusim/utilities/trees.py | 3 | """
Support for basic tree structures.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
class TreeNode(object):
"""
A node in a tree structure.
@ivar parent: The parent node in the tree structure.
@ivar children: List of child nodes.
"""
def __init__(self, parent):
"""
Construct a new L{TreeNode}
If parent is not `None`, then the new node will be automatically
added to the tree structure.
@param parent: The parent of this node, or `None` for the
root of a tree.
"""
self.parent = parent
self.children = []
@property
def parent(self):
"""
The parent node of this node.
Setting this property will automatically insert the node into
the tree structure by adding the node to its parent's list of
children. If the node had a previous parent it is first removed
from the old parent's children.
"""
return self._parent
@parent.setter
def parent(self, newParent):
if getattr(self, '_parent', None) is not None:
self._parent.children.remove(self)
if newParent is not None:
newParent.children.append(self)
self._parent = newParent
@property
def hasParent(self):
"""
Whether this node has a parent.
"""
return self.parent is not None
@property
def hasChildren(self):
"""
Whether this node has any children.
"""
return len(self.children) > 0
@property
def root(self):
"""
The root node of the tree structure this node is part of.
"""
return self.parent.root if self.hasParent else self
@property
def depth(self):
return 0 if not self.hasParent else (self.parent.depth + 1)
def ascendTree(self):
"""
Generator returning the nodes of the tree encountered while ascending
from the current node to the root node.
E.g. for the tree::
A
/ \
B C
/ \ \
D E F
D.ascendTree() would return a generator yielding D, B, A.
@return: A generator that returns the nodes of the tree encountered
while ascending from the current node to the root.
"""
yield self
if self.hasParent:
for point in self.parent.ascendTree():
yield point
def preorderTraversal(self, preFunc=None, postFunc=None,
condition=lambda p: True):
"""
Perform a pre-order depth-first traversal of the tree structure.
The traversal will start from this node. At each node traversed,
callback functions will be called with the current node as the
only argument.
@param preFunc: Function to call before traversing to the next node.
@param postFunc: Function to call after traversing the child nodes.
@param condition: Condition that nodes must meet to be included.
@return: A generator that returns the nodes of the (sub-)tree rooted
at this joint, that meet the given condition, in depth first
pre-order.
"""
cond = condition(self)
if cond:
if preFunc is not None: preFunc(self)
yield self
for child in self.children:
for grandchild in child.preorderTraversal(preFunc,
postFunc, condition):
yield grandchild
if cond:
if postFunc is not None: postFunc(self)
def __iter__(self):
return self.preorderTraversal()
|
wendellpbarreto/gastroprocto | refs/heads/master | gastroprocto/apps/accounts/models.py | 10644 | from django.db import models
# Create your models here.
|
RayMick/SFrame | refs/heads/master | cxxtest/admin/virtualenv_1.7.py | 77 | #!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
virtualenv_version = "1.7"
import base64
import sys
import os
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import distutils.sysconfig
from distutils.util import strtobool
try:
import subprocess
except ImportError:
if sys.version_info <= (2, 3):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.')
print('If you copy subprocess.py from a newer version of Python this script will probably work')
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
#"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink'):
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content:
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in dirs:
if os.path.exists(join(dir, filename)):
return join(dir, filename)
return filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.24.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
remove_from_env = []
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
logger.info('Using existing %s egg: %s' % (project_name, source))
os.chdir(os.path.dirname(source))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
else:
if never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
setup_fn or source,
search_dirs))
sys.exit(1)
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if sys.platform == 'win32':
easy_install_script = 'easy_install-script.py'
cmd = [join(os.path.dirname(py_executable), easy_install_script), filename]
if sys.platform == 'win32':
cmd.insert(0, py_executable)
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.add_option(
'--no-site-packages',
dest='no_site_packages',
action='store_true',
help="Don't give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute',
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt=',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
# Force --use-distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if options.no_site_packages:
logger.warn('The --no-site-packages flag is deprecated; it is now '
'the default behavior.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
# use_distribute also is True if VIRTUALENV_DISTRIBUTE env var is set
# we also check VIRTUALENV_USE_DISTRIBUTE for backwards compatibility
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print('Error: the path "%s" has a space in it' % home_dir)
print('To handle these kinds of paths, the win32api module must be installed:')
print(' http://sourceforge.net/projects/pywin32/')
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if sys.platform == "darwin":
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
prefixes = list(map(os.path.abspath, prefixes))
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
fix_local_scheme(home_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if sys.platform == 'win32':
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif sys.platform == 'darwin':
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
else:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if sys.platform == 'win32':
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if sys.platform == 'win32' or sys.platform == 'cygwin':
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal(
"Could not call install_name_tool -- you must have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if sys.platform == 'win32' and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
cmd = [py_executable, '-c', """
import sys
prefix = sys.prefix
if sys.version_info[0] == 3:
prefix = prefix.encode('utf8')
if hasattr(sys.stdout, 'detach'):
sys.stdout = sys.stdout.detach()
elif hasattr(sys.stdout, 'buffer'):
sys.stdout = sys.stdout.buffer
sys.stdout.write(prefix)
"""]
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if sys.platform == 'win32':
logger.fatal(
'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT}
if os.environ.get('OS') == 'Windows_NT' and os.environ.get('OSTYPE') == 'cygwin':
files['activate'] = ACTIVATE_SH
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
home_dir = os.path.abspath(home_dir)
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.symlink(os.path.abspath(home_dir), local_path)
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64'))
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
activate = "import os; activate_this=os.path.join(os.path.dirname(__file__), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
if sys.platform == 'win32':
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
lines = f.readlines()
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
lines = [new_shebang+'\n', activate+'\n'] + lines[1:]
f = open(filename, 'wb')
f.writelines(lines)
f.close()
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.read().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.4'`` then the
script will start with ``#!/usr/bin/env python2.4`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = open(filename, 'rb')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzVPP1z2zaWv/OvwMqTIZXKdD66nR2n7o2TOK3v3MTbpLO5dT06SoIk1hTJEqQV7c3d337vAwAB
kvLHdvvDaTKxRAIPDw/vGw8YjUanZSnzhdgUiyaTQsmkmq9FmdRrJZZFJep1Wi0Oy6Sqd/B0fpOs
pBJ1IdROxdgqDoKnv/MTPBWf1qkyKMC3pKmLTVKn8yTLdiLdlEVVy4VYNFWar0Sap3WaZOk/oEWR
x+Lp78cgOM8FzDxLZSVuZaUArhLFUlzu6nWRi6gpcc7P4z8nL8cToeZVWtbQoNI4A0XWSR3kUi4A
TWjZKCBlWstDVcp5ukzntuG2aLKFKLNkLsV//RdPjZqGYaCKjdyuZSVFDsgATAmwSsQDvqaVmBcL
GQvxWs4THICft8QKGNoE10whGfNCZEW+gjnlci6VSqqdiGZNTYAIZbEoAKcUMKjTLAu2RXWjxrCk
tB5beCQSZg9/MsweME8cv885gOOHPPg5T79MGDZwD4Kr18w2lVymX0SCYOGn/CLnU/0sSpdikS6X
QIO8HmOTgBFQIktnRyUtx7d6hb47IqwsVyYwhkSUuTG/pB5xcF6LJFPAtk2JNFKE+Vs5S5McqJHf
wnAAEUgaDI2zSFVtx6HZiQIAVLiONUjJRolok6Q5MOuPyZzQ/luaL4qtGhMFYLWU+LVRtTv/aIAA
0NohwCTAxTKr2eRZeiOz3RgQ+ATYV1I1WY0CsUgrOa+LKpWKAABqOyG/ANITkVRSk5A508jthOhP
NElzXFgUMBR4fIkkWaarpiIJE8sUOBe44t2Hn8Tbs9fnp+81jxlgLLOrDeAMUGihHZxgAHHUqOoo
K0Cg4+AC/4hksUAhW+H4gFfb4OjelQ4imHsZd/s4Cw5k14urh4E51qBMaKyA+v03dJmoNdDnf+5Z
7yA43UcVmjh/264LkMk82UixTpi/kDOCbzWc7+KyXr8CblAIpwZSKVwcRDBFeEASl2ZRkUtRAotl
aS7HAVBoRm39VQRWeF/kh7TWHU4ACFWQw0vn2ZhGzCVMtA/rFeoL03hHM9NNArvOm6IixQH8n89J
F2VJfkM4KmIo/jaTqzTPESHkhSA8CGlgdZMCJy5icUGtSC+YRiJk7cUtUSQa4CVkOuBJ+SXZlJmc
sPiibr1bjdBgshZmrTPmOGhZk3qlVWunOsh7L+LPHa4jNOt1JQF4M/OEblkUEzEDnU3YlMmGxave
FsQ5wYA8USfkCWoJffE7UPRUqWYj7UvkFdAsxFDBssiyYgskOw4CIQ6wkTHKPnPCW3gH/wNc/D+T
9XwdBM5IFrAGhcjvA4VAwCTIXHO1RsLjNs3KXSWT5qwpimohKxrqYcQ+YsQf2BjnGrwvam3UeLq4
ysUmrVElzbTJTNni5WHN+vEVzxumAZZbEc1M05ZOG5xeVq6TmTQuyUwuURL0Ir2yyw5jBgNjki2u
xYatDLwDssiULciwYkGls6wlOQEAg4UvydOyyaiRQgYTCQy0KQn+JkGTXmhnCdibzXKAConN9xzs
D+D2DxCj7ToF+swBAmgY1FKwfLO0rtBBaPVR4Bt905/HB049X2rbxEMukzTTVj7Jg3N6eFZVJL5z
WWKviSaGghnmNbp2qxzoiGI+Go2CwLhDO2W+Fiqoq90xsIIw40ynsyZFwzedoqnXP1TAowhnYK+b
bWfhgYYwnd4DlZwuy6rY4Gs7t4+gTGAs7BEciEvSMpIdZI8TXyH5XJVemqZoux12FqiHgsufzt6d
fz77KE7EVavSJl19dg1jnuUJsDVZBGCqzrCtLoOWqPhS1H3iHZh3YgqwZ9SbxFcmdQO8C6h/qhp6
DdOYey+Ds/enry/Opj9/PPtp+vH80xkgCHZGBgc0ZTSPDTiMKgbhAK5cqFjb16DXgx68Pv1oHwTT
VE3LXbmDB2AogYWrCOY7ESE+nGobPE3zZRGOqfGv7ISfsFrRHtfV8dfX4uREhL8mt0kYgNfTNuVF
/JEE4NOulNC1hj9RocZBsJBLEJYbiSIVPSVPdswdgIjQstCW9dcizc175iN3CJL4iHoADtPpPEuU
wsbTaQikpQ4DH+gQszuMchJBx3Lndh1rVPBTSViKHLtM8L8BFJMZ9UM0GEW3i2kEAraZJ0pyK5o+
9JtOUctMp5EeEMSPeBxcJFYcoTBNUMtUKXiixCuodWaqyPAnwke5JZHBYAj1Gi6SDnbi2yRrpIqc
SQERo6hDRlSNqSIOAqciAtvZLt143KWm4RloBuTLCtB7VYdy+DkADwUUjAm7MDTjaIlphpj+O8cG
hAM4iSEqaKU6UFificuzS/Hy2YtDdEAgSlxY6njN0aameSPtwyWs1krWDsLcK5yQMIxduixRM+LT
47thbmK7Mn1WWOolruSmuJULwBYZ2Fll8RO9gVga5jFPYBVBE5MFZ6VnPL0EI0eePUgLWnug3oag
mPU3S3/A4bvMFagODoWJ1DpOZ+NVVsVtiu7BbKdfgnUD9YY2zrgigbNwHpOhEQMNAX5rjpTayhAU
WNWwi0l4I0jU8ItWFcYE7gJ16zV9vcmLbT7l2PUE1WQ0tqyLgqWZFxu0S3Ag3oHdACQLCMVaojEU
cNIFytYhIA/Th+kCZSkaAEBgmhUFWA4sE5zRFDnOw2ERxviVIOGtJFr4WzMEBUeGGA4kehvbB0ZL
ICSYnFVwVjVoJkNZM81gYIckPtddxBw0+gA6VIzB0EUaGjcy9Ls6BuUsLlyl5PRDG/r582dmG7Wm
jAgiNsNJo9FfknmLyx2YwhR0gvGhOL9CbLAFdxTANEqzpjj8KIqS/SdYz0st22C5IR6r6/L46Gi7
3cY6H1BUqyO1PPrzX7755i/PWCcuFsQ/MB1HWnRyLD6id+iDxt8aC/SdWbkOP6a5z40EK5LkR5Hz
iPh936SLQhwfjq3+RC5uDSv+b5wPUCBTMyhTGWg7ajF6og6fxC/VSDwRkds2GrMnoU2qtWK+1YUe
dQG2GzyNedHkdegoUiW+AusGMfVCzppVaAf3bKT5AVNFOY0sDxw+v0YMfM4wfGVM8RS1BLEFWnyH
9D8x2yTkz2gNgeRFE9WLd3fDWswQd/FwebfeoSM0ZoapQu5AifCbPFgAbeO+5OBHO6No9xxn1Hw8
Q2AsfWCYV7uCEQoO4YJrMXGlzuFq9FFBmrasmkHBuKoRFDS4dTOmtgZHNjJEkOjdmPCcF1a3ADp1
cn0mojerAC3ccXrWrssKjieEPHAintMTCU7tce/dM17aJssoBdPhUY8qDNhbaLTTBfBlZABMxKj6
ecQtTWDxobMovAYDwArO2iCDLXvMhG9cH3B0MBpgp57V39ebaTwEAhcp4uzRg6ATyic8QqVAmsrI
77mPxS1x+4PdaXGIqcwykUirPcLVVR6DQnWnYVqmOepeZ5HieVaAV2y1IjFS+953FihywcdDxkxL
oCZDSw6n0Ql5e54AhrodJrxWDaYG3MwJYrRJFVk3JNMa/gO3gjISlD4CWhI0C+ahUuZP7F8gc3a+
+sse9rCERoZwm+5zQ3oWQ8Mx7w8EklHnT0AKciBhXxjJdWR1kAGHOQvkCTe8lnulm2DECuTMsSCk
ZgB3eukFOPgkxj0LklCE/KVWshRfiREsX1dUH6a7/6VcatIGkdOAXAWdbzhxcxFOHuKkk5fwGdrP
SNDuRlkAB8/A5XFT8y6bG6a1aRJw1n3FbZECjUyZk9HYRfXaEMZN//7pxGnREssMYhjKG8jbhDEj
jQO73Bo0LLgB4615dyz92M1YYN8oLNQLufkC8V9YpWpeqBAD3F7uwv1orujTxmJ7kc5G8MdbgNH4
2oMkM52/wCzLPzFI6EEPh6B7k8W0yCKptmkekgLT9Dvxl6aHhyWlZ+SOPlI4dQQTxRzl0bsKBIQ2
K49AnFATQFQuQ6Xd/j7YO6c4snC5+8hzm6+OX173iTvZl+Gxn+GlOvtSV4nC1cp40VgocLX6BhyV
LkwuyXd6u1FvR2OYUBUKokjx4eNngYTgTOw22T1u6i3DIzb3zsn7GNRBr91Lrs7siF0AEdSKyChH
4eM58uHIPnZyd0zsEUAexTB3LIqBpPnkn4Fz10LBGIeLXY55tK7KwA+8/ubr6UBm1EXym69H94zS
IcaQ2EcdT9COTGUAYnDapkslk4x8DacTZRXzlndsm3LMCp3iP81k1wNOJ37Me2MyWvi95r3A0XwO
iB4QZhezXyFYVTq/dZukGSXlAY3DQ9RzJs7m1MEwPh6ku1HGnBR4LM8mg6GQunoGCxNyYD/uT0f7
Racm9zsQkJpPmag+Kgd6A77dP/I21d29w/2yP2ip/yCd9UhA3mxGAwR84BzM3ub//5mwsmJoWlmN
O1pfybv1vAH2AHW4x825ww3pD827WUvjTLDcKfEUBfSp2NKGNuXycGcCoCzYzxiAg8uot0XfNFXF
m5sk56WsDnHDbiKwlsd4GlQi1Adz9F7WiIltNqfcqFP5UQypzlBnO+1MwtZPHRbZdWFyJDK/TSvo
C1olCn/48ONZ2GcAPQx2GgbnrqPhkofbKYT7CKYNNXHCx/RhCj2myz8vVV1X2Seo2TM2GUhNtj5h
e4lHE7cOr8E9GQhvg5A3YjEinK/l/GYqaXMZ2RS7OknYN/gaMbF7zn6FkEqWVOYEM5lnDdKKHT2s
T1s2+Zzy8bUEe66LSbG4hLaMOd20zJKViKjzAlMdmhspG3KbVNrbKasCyxdFky6OVulCyN+aJMMw
Ui6XgAtuluhXMQ9PGQ/xlne9uaxNyXlTpfUOSJCoQu810Qa503C244lGHpK8rcAExC3zY/ERp43v
mXALQy4TjPoZdpwkxnnYwWwGInfRc3ifF1McdUpVoBNGqr8PTI+D7ggFABgBUJj/aKwzRf4bSa/c
DS1ac5eoqCU9UrqRbUEeB0KJxhhZ82/66TOiy1t7sFztx3J1N5arLparQSxXPparu7F0RQIX1iZJ
jCQMJUq6afTBigw3x8HDnCXzNbfD6kCsAgSIojQBnZEpLpL1Mim8n0RASG07G5z0sK2wSLnssCo4
5apBIvfjpokOHk15s9OZ6jV0Z56K8dn2VZn4fY/imIqJZtSd5W2R1EnsycUqK2YgthbdSQtgIroF
J5yby2+nM84mdizV6PI/P/3w4T02R1Ajs51O3XAR0bDgVKKnSbVSfWlqg40S2JFa+oUf1E0DPHhg
JodHOeD/3lJFATKO2NKOeCFK8ACo7sc2c6tjwrDzXJfR6OfM5Ly5cSJGeT1qJ7WHSKeXl29PP52O
KMU0+t+RKzCGtr50uPiYFrZB339zm1uKYx8Qap1LaY2fOyeP1i1H3G9jDdiO2/vsuvPgxUMM9mBY
6s/yD6UULAkQKtbJxscQ6sHBz+8KE3r0MYzYKw9zd3LYWbHvHNlzXBRH9IfS3N0B/M01jDGmQADt
QkUmMmiDqY7St+b1Doo6QB/o6/3uEKwbenUjGZ+idhIDDqBDWdtsv/vn7Quw0VOyfn32/fn7i/PX
l6effnBcQHTlPnw8eiHOfvwsqB4BDRj7RAluxddY+QKGxT0KIxYF/GswvbFoak5KQq+3Fxd6Z2CD
hyGwOhZtTgzPuWzGQuMcDWc97UNd74IYZTpAck6dUHkInUrBeGnDJx5UoSto6TDLDJ3VRode+jSR
OXVE+6gxSB80dknBILikCV5RnXNtosKKd5z0SZwBpLSNtoUIGeWgetvTzn6LyeZ7iTnqDE/azlrR
X4UuruF1rMoshUjuVWhlSXfDcoyWcfRDu6HKeA1pQKc7jKwb8qz3YoFW61XIc9P9xy2j/dYAhi2D
vYV555LKEahGF4upRIiNeOcglF/gq116vQYKFgw3lmpcRMN0Kcw+geBarFMIIIAn12B9MU4ACJ2V
8BPQx052QBZYDRC+2SwO/xpqgvitf/lloHldZYd/FyVEQYJLV8IBYrqN30LgE8tYnH14Nw4ZOSoF
FX9tsIAcHBLK8jnSTvUyvGM7jZTMlrqewdcH+EL7CfS6072SZaW7D7vGIUrAExWR1/BEGfqFWF5k
YU9wKuMOaKyNt5jhGTN329t8DsTHtcwyXRF9/vbiDHxHLNdHCeJ9njMYjvMluGWri734DFwHFG7o
wusK2bhCF5Y29Rex12wwM4siR729OgC7TpT97PfqpTqrJFUu2hFOm2GZgvMYWRnWwiwrs3anDVLY
bUMUR5lhlpheVlQw6fME8DI9TTgkglgJDwOYNDPvWqZ5bSrksnQOehRULijUCQgJEhdPvBHnFTkn
eotKmYMy8LDcVelqXWMyHTrHVKSPzX88/Xxx/p4K11+8bL3uAeacUCQw4aKFEyxJw2wHfHHLzJCr
ptMhntWvEAZqH/jTfcXVECc8QK8fJxbxT/cVn1Q6cSJBngEoqKbsigcGAE63IblpZYFxtXEwftyS
sxYzHwzlIvFghC4scOfX50TbsmNKKO9jXj5il2JZahpGprNbAtX96DkuS9xWWUTDjeDtkGyZzwy6
3vTe7Cu2cj89KcRDk4BRv7U/hqlG6jXV03GYbR+3UFirbewvuZMrddrNcxRlIGLkdh67TDashHVz
5kCvbLcHTHyr0TWSOKjKR7/kI+1heJhYYvfiFNORjk2QEcBMhtSnQxrwodAigAKhatPIkdzJ+OkL
b46ONbh/jlp3gW38ARShrv2kMwVFBZwIX35jx5FfEVqoR49F6HgqucwLW5eEn+0avcrn/hwHZYCS
mCh2VZKvZMSwJgbmVz6x96RgSdt6pL5Kr4cMizgH5/TLHg7vy8XwxolBrcMIvXY3ctdVRz55sMHg
0YM7CeaDr5It6P6yqSNeyWGRHz5ttR/q/RCx2g2a6s3eKMR0zG/hnvVpAQ9SQ8NCD++3gd0i/PDa
GEfW2sfOKZrQvtAe7LyC0KxWtC3jHF8zvqj1AlqDe9Ka/JF9qgtT7O+Bc0lOTsgC5cFdkN7cRrpB
J50w4uMxfLYwpfLr9vSGfreQtzIrwPWCqA6r63+11fXj2KZTBuuOfjd2l7vL3TBu9KbF7NiU/6Nn
pkpYvziX9RGiM5jxuQuzFhlc6l90SJLkN+Qlv/nb+US8ef8T/P9afoC4Co/HTcTfAQ3xpqggvuTz
nXTwHk8O1Bw4Fo3CM3QEjbYq+I4CdNsuPTrjtog+0uCfZbCaUmAVZ7XhizEARZ4gnXlu/QRTqA+/
zUmijjdqPMWhRRnpl0iD/Ycr8EDCkW4Zr+tNhvbCyZK0q3k1ujh/c/b+41lcf0EONz9HThbFLwDC
6eg94gr3wybCPpk3+OTacZx/kFk54DfroNMc1MCgU4QQl5Q20ORLFxIbXCQVZg5EuVsU8xhbAsvz
2bB6C4702Ikv7zX0npVFWNFY76K13jw+BmqIX7qKaAQNqY+eE/UkhJIZHlLix/Fo2BRPBKW24c/T
m+3CzYzr0yY0wS6m7awjv7vVhWums4ZnOYnwOrHLYA4gZmmiNrO5ezDtQy70nRmg5WifQy6TJquF
zEFyKcinywtA07tnyVhCmFXYnNEBK0rTZNtkp5xKm0SJEY46ovPXuCFDGUOIwX9Mbtge4CE30fBp
WYBOiFL8VDhdVTNfswRzSETUGyg82Kb5yxdhj8I8KEfI89aRhXmi28gYrWSt588PovHV87bSgbLS
c+8k6bwEq+eyyQGozvLp06cj8W/3ez+MSpwVxQ24ZQB70Gu5oNd7LLeenF2tvmdv3sTAj/O1vIIH
15Q9t8+bnFKTd3SlBZH2r4ER4tqElhlN+45d5qRdxRvN3II3rLTl+DlP6WYcTC1JVLb6giFMOxlp
IpYExRAmap6mIacpYD12RYOHwDDNqPlFfgGOTxHMBN/iDhmH2mv0MKlg03KPRedEjAjwiAqoeDQ6
RUvHoADP6eVOozk9z9O6Pb/wzN081afFa3vhjeYrkWxRMsw8OsRwzhN6rNp62MWdLOpFLMX8yk04
dmbJr+/DHVgbJK1YLg2m8NAs0ryQ1dyYU1yxdJ7WDhjTDuFwZ7rnh6xPHAygNAL1TlZhYSXavv2T
XRcX0w+0j3xoRtLlQ7W9O4mTQ0neqaKL43Z8SkNZQlq+NV/GMMp7SmtrT8AbS/xJJ1WxeN274sE9
R9fk+uoGrt9o73MAOHRdkFWQlh09HeHcUWXhM9PuuXABPxSiE263aVU3STbVNwRM0WGb2o11jac9
f3XnyULrrYCTX4AHfKhLxcFxMFU2SE+s9DRHAU7EUqcoYvdIk3/6pyzQy3vBvhL4FEiZxdQcxDVJ
pCvLrvaE4zO+gsBR8QjqK3Nq5iE2wZzd6B17cKcxoaKncNwt5ey1wg0WU5tvPe9uZPCoITuwfC/e
TLB7cYP47kREzyfiz51AbF7u8OohIMOTRfxkEfo+IXW9On7R2rl+4NuBsBfIy+tHTzdLZzS9cKjG
+v6+uugRA9ANyO4ylYvDJwqxY5x/L1QNpZ3Xfk6lGeMR7ANbdaVPH7dnMujo1Qyiim2r0BzVZvxf
O4g51qz1EJ8ARaXBFtCeWjeFL53iQ3uzGBYmavT8lUUpmQ5tjuE3vB0E3muCukK1d9NUl5FbsAM5
AX1WkLfA2oYDQeEjeCikm0xo0b7qbAv/kYvHlen7Nhd7WH7z9V14ugI+WJY/QFCPmE6rP5Cp9rLM
YxfmAfv19/Pfw3nvLr57NJV0r2FaYSiFhczrhN+gSWzKY5tqMCKJW0GRW96Gn/pm8OAHiyPqpvom
vGv63P+uuesWgZ252d3tzd0/4OXSQPfdzy9DNOAwTxPiQTXjrcAO6wJXjCe6qGA4Zak/SH63E850
j1a4D4wpYcAEKLGpxt5ozU0yd79jhcwh32Hqnucb1NWdafcOOHY5/iGKlqsB8Lk94kslHgvNgew3
0qVUUy4anMrVSk0TvBBtSsEGFbj0vEjjvr6j+6xkonbG68RbQwCE4SZdiuhWGwNjQEDDF7NyfYhz
PYSgoamK0inLVOmCM0jaxQVwMWeOqL/JTHJd5SiTmPBTTVVWEBWM9PWdXLgwVOvZAjWJjE2ibgzq
psdE3+aIQ3C1jDkDyPkqjjQ86gAh+GiQczcRFypPp/Yd8Muz9qxzOrEMIfNmI6ukbu/58LdJU/Gd
MwKd/MQFdlIVrWR2OMVFLLX84SCFyQL7/SvtZHtBxh0HnMdW6z2craiHToE95uy0Y3sMN6df7D1f
7v0yC7oV1jXytlnLffZuE1gKc2kV6UqdO+C3+iIdvp6RM5voJjh8BHLvnrvyy3OtWmMnxaLhPHMV
Q//mFDy6S7Z46EK0Hhf0rz7rOPp2fF9vWGbphQZ7GlsqatdqUPG0o43biBor6e6JqP1q6UdG1B78
B0bU+vo6MDgaH60PBuun7wm9WU24d8G1jAB9pkAk3Nnr3CRmTGbkViND2Jt+Gdm7WFlnOkecjJlA
juxfEkQg+M435ZZuencymXGHIlpfuujx9xcfXp9eEC2ml6dv/uP0e6pWwfRxx2Y9OOWQF4dM7UOv
LtZNP+gKg6HBW2wHLlfkwx0aQu99b3N2AMLwQZ6hBe0qMvf1vg69AxH9ToD43dPuQN2nsgch9/wz
XXzv1hV0ClgD/ZSrDc0vZ8vWPDI7FywO7c6Eed8mk7WM9nJt+xbOqfvrqxPtt+rr+PbkAce2+pRW
AHPIyF82hWyOEthEJTsq3RvyqWQWj2GZqyxACufSuVKNblNjULV/FX8Fyi7BfTB2GCf2Wltqx+ly
Ze9rxr2wuYwNQbxzUKP+/FxhX8hsDxWCgBWevjCMETH6T28w2e3YJ0pcHdKJy0NUNtf2F66ZdnL/
luKma20v3lFcucHbTtB42WTuRqrt0+tAzh9l54ulU+IPmu8I6NyKpwL2Rp+JFeJsJ0IIJPWGIVYN
Eh31rVkO8mg3HewNrZ6Jw33n8dzzaEI8399w0Tnypnu84B7qnh6qMaeeHAuM5Wv7DtqJ7wgyb+8I
umnHcz5wT1Ff8Apfb6+eH9tkK/I7vnYUCZXZjBzDfuWUqd15u5vTnZilmlAdE8ZszjFN3eLagco+
wb4Yp1ervycOMvu+DGnkvR8u8jE9vFurR11MLesdw5RE9ESNaVrO6QaNu30y7k+3VVt9IHxS4wFA
eioQYCGYnm50Kud2XP4aPdNR4ayhezHdjHvoSAVV0fgcwT2M79fi1+1OJywf1J1RNP25QZcD9ZKD
cLPvwK3GXkpkv0noTr3lgz0uAB9WHe7//AH9+/VdtvuLu/xq2+rl4AEp9mWxJBArJTokMo9jMDKg
NyPS1lhHbgQdL6Fo6egyVDs35At0/KjMEG+9pQCDnNmp9gCsUQj+D1/Qrqc=
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmtv49a1/a5fwSgwJGE0NN8PDzRFmkyBAYrcIo8CFx5XPk+LHYpUSWoctch/v+ucQ1KkZDrt
RT6UwcQ2ebjPfq6195G+/upwanZlMZvP538sy6ZuKnKwatEcD01Z5rWVFXVD8pw0GRbNPkrrVB6t
Z1I0VlNax1qM16qnlXUg7DN5EovaPLQPp7X192PdYAHLj1xYzS6rZzLLhXql2UEI2QuLZ5VgTVmd
rOes2VlZs7ZIwS3CuX5BbajWNuXBKqXZqZN/dzebWbhkVe4t8c+tvm9l+0NZNUrL7VlLvW58a7m6
sqwS/zhCHYtY9UGwTGbM+iKqGk5Qe59fXavfsYqXz0VeEj7bZ1VVVmurrLR3SGGRvBFVQRrRLzpb
utabMqzipVWXFj1Z9fFwyE9Z8TRTxpLDoSoPVaZeLw8qCNoPj4+XFjw+2rPZT8pN2q9Mb6wkCqs6
4vdamcKq7KDNa6OqtTw8VYQP42irZJi1zqtP9ey7D3/65uc//7T964cffvz4P99bG2vu2BFz3Xn/
6Ocf/qz8qh7tmuZwd3t7OB0y2ySXXVZPt21S1Lc39S3+63e7nVs3ahe79e/9nf8wm+15uOWkIRD4
Lx2xxfmNt9icum8PJ8/2bfH0tLizFknieYzI1HG90OFJkNA0jWgsvZBFImJksX5FStBJoXFKEhI4
vghCx5OUJqEQvnTTwI39kNEJKd5YlzAK4zhMeUIinkgWBE7skJQ7sRd7PE1fl9LrEsAAknA3SrlH
RRS5kvgeiUToiUAm3pRF/lgXSn2XOZLFfpqSyA/jNI1DRngqQ+JEbvKqlF4XPyEJw10eCcY9zwti
6capjDmJolQSNiElGOsSeU4QEi8QPBCuoCyOpXD8lJBARDIW4atSzn5h1CNuEkKPhBMmJfW4C30c
n/rUZcHLUthFvlBfejQM/ZRHiGss44DwOHU9CCKpk0xYxC7zBfZwweHJKOYe96QUbuA4qR8F0iPB
RKSZ64yVYXCHR2jIfeJ4YRSEEeLDXD9xHBI7qfO6mF6bMOZ4ETFKaeLEscfClIQ+SQLfJyHnk54x
YsJODBdBRFgCX6YxS9IwjD0RiiREOgqasPh1MVGvTSJQSURIJ4KDPCaiwA0gzYORcPhEtAEqY994
lAiCGnZ9jvdRRl4iYkpCGhJoxMXrYs6R4pGfypQ6EBawwAvS2PEDLpgnmMO8yUi5Y99EAUsD6VMZ
kxhZ6AuW+MKhHsIdByn1XhfT+4ZKknqu41COMHHUBCQJzn0EPgqcJJoQc4Ez0nGigMqIEI/G3IFa
8GyAxHYSN2beVKAucCZyIzf1hGB+KINYIGpuxHhEXA9SvXhKygXOSDcBQAF8uUSqEC9MWQop0uUx
jRM5gVbsAmeEI3gcRInH0jShksbwdOIgex3EPHangu2Pg0SokG4kOYdhYRi6QRK4LAZ+8TRJo3BK
ygVaUYemru8SRqjvOXAGcC6WQcBCAEXsylel9BYhSST2jHggqfRRUVSmQcQcuAqoJ6YSJhhblCi0
BvD7HuM0ZbFHmQwAX14kvYTIKbQKxxYJkUqeOFAHBYmMlb4ApocxAIMnbjQV6XBsEZHAKi7BKm7s
uELAuTHIKaQMhEeiKZQJL2KUcF9GAISAMUKS2A2QONyPKWPc5yGfkBKNLULBJGD5xHUjMFGSBLEH
EWDMMEhR2lPAGV2wGwsjIsOYwr/oHlANkQNDgsBHgYVkChuisUXUkwmJQw9kD9ilPkjaQai5CCVa
idCfkBJfwJ2DGMmUcOaTyA1F6LohyhAtRQIInMyX+IIJSCLTMAALcGC5I2kUM+lKD2HAI2+qAuKx
RQE4lgBvJVoGFGDgB67rSi4S38W/eEqX5KIbclQv5KXwSMrBHyoFAeCJ76jGynldSm8Ro8RPgA3o
OYLEZ47KWWQbnM3ALJM0kIwtcmPPjQFyCHTKmRs6YeqQMKG+QJ2n4VSk07FF0J0FDpoZV3mYBmkk
AiapcBLYypypSKcXyIAkQ2MHbvWThEdAJyKEEwG8WOQHU/1dK6W3SAqE1hchcWPqegxhYmHg0hjc
C+YXU0ySjvmIEZSNKxVqEk9wAJOb+mC2mIaphx4HUn6dDSYCjDf1rKlOd2bg2pF6l2e0m7fQu8/E
L0xg1Pio73xQI1G7Fg+H62ZcSGv7heQZun2xxa0ldNoWmAfXlhoAVnfagExa3X01M3bjgXmoLp5h
tmgwLigR+kV7J34xdzHfdcsgp1351aaXct+JfjjLUxfmLkyD79+r6aRuuKgw1y1HK9Q1Vya1FrTz
4Q2mMIIxjH9lWcu/lHWd0Xww/mGkw9/7P6zmV8JuejNHj1ajv5Q+4pesWXrmfoXgVoV2l3HoxXCo
F7Xj1eZimFv3am0pqcVmMNCtMSluMapuytpmxwq/mWTqX+AiJ6eNG87aIGFs/ObYlHv4gWG6PGEU
Lfhtb/bgpEDN9XvyGbHE8PwFriLKQXCeMu1Amp0Z5x9bpR+telcec66mWWJ8PZTWTebFcU9FZTU7
0lgYhHvBWpaagAvlXUti6u2VOhZcvyKsx5EjHi010i6fdxnbdbsLaK2OJow8a3G7WNlQ0njpUW2p
5AyOMXaiGh2QPGeYuek5EwRfIyNNgmuVixL+yCtB+OmsPvb4KAfqabfr7dqzCS2mabXU0qjQqrQO
0ScWrCx4bXzTqXEgSBTlVHhElVXWZAhd8TQ4zzARb+0vC6HPE8zZCDd6wallrnz44vmI0rI9bBCt
MH2WU5VH7CSMKqbOiLUXdU2ehDngOBfd46POl4pktbB+PNWN2H/4RfmrMIEoLNLgnjnZIFRBizJe
paAyxpx62F2G6p/PpN4aFIL9G2tx+Py0rURdHism6oVCGLX9vuTHXNTqlGQAoJePTU2g6jjyoHXb
cnVGEpVym3PRDOqy9dhFCXZlt74otDMGdEViw7OiapbOWm0yALkWqPud3g1Pd2h3zLdtA7PVwLxR
MkyAAOyXskYO0g9fQPj+pQ6Qhg5pH13vMBJtt8m1nJ81fr+Zv2ldtXrXyh6qMBbwV7Py27KQecaa
QRxgokFOBstluVzduw9DYhgmxX9KBPOfdufCmCiF5fvNTb3qy7wrb33K+akYc8GckWLRqGrrqwdw
ok72dPm0J3mqkI5FgSy3rb/kAsnTLb+Sp8pLVTmwScCWTkOZVXWzBmGoSllAwqnLCuvtzwPlF/aF
vE/Fp2L57bGqIA1IbwTcVBeUtgKhndNc2KR6qu+dh9fp7MWwfpchZzN6VBT7fdn8qQRwD3KI1PWs
LcR8/OZ6WKv3F5X+oF75Gk7RXFB+HtHpMHsNr75UxL83uapSR6aOWPW7FyhUFy05U4CVl8w0IBos
jQ1ZY86DdUPxX0qpBpDViX9Hqb/FqOqe2vWaTg3KP54ZcoIFS8N9HfUpCmHNkeRnI1pKGdNG94FC
BWahHjJrh3zMTdJ23enGGkDX25sanfZNrRrt+bAWLg68TeJD7pAplM+sN+OGsCZfBLTfoAE3FPD3
MiuWHWF0S424umJKnO6Kvwd3d420Qp/uddRd3dRLI3Z1p4rhmy9lphLoIIhix06dui+2EXqrS6ci
hyDljbrzUl4+jVap1lvFZfyuurDSfiZVsVR+fvv7XebzkBYrW3CuX8ryG50S6nOSpfgiCvUHzDlA
2dlO5AfV5X002TboNPpUQSui8l99krNUrpgB5dcWoGqmbu1RzoWAI/EK6lD1uQBd8awglmB4rWv9
9hDWNSjbs3ZLoHHb0Zx3hMq8y2Z7NlsCEcWd8rAWsydsp5orXgrDNTuEF0o0z2X1ud10bR0MYZS0
Ie2ncAopNErcAEwVisADTPfoegEknyuxrZxKtAQ0NMBe/Z5RRFKsr1JmALpX7ZPOsrWqpqvX0D/o
ZG0yNUe2bVIuxOGd+bG86LTG2dnBsKa6eq63uKAyXXItPtj4WR5Esbxa9rX1A1r82+cqawA+iDH8
q5trYPjntfog8FlFT3UArFJlCGhkZVUddXLk4kKYjvswPVTP3Qi9vsPE7mo/VJsauWGArcaP5Wqs
sUERbY3BivX8mc7hTjywtR1m6O5fwuinRsC7SwjABnd6F5aXtViuriCibu600OHzls060IKCufql
g63Zv3Mp/t4j05foQb6spxj7zLkfX/uIVHPsB3RL7aqOIF5qnS8+en6tbzajQo/VVxLPa14fJ/Rc
7lx3WeOhYTQz6Jip0hhMCqzc72GoPWoLu8Mb0o5f3dXGSLs4BxdoP6/eqLOVh5VO02exqHRaC0vR
+G+mirJU+fmCq5Ta1xyCRccC897nZW+WyGsxiMawF7e329Zb2621wQDo2I7tLv7jrv9/AfAaXNUU
TOsyF6jViUG46+NBJqZXv+rRK7Evv2i81ZEw33DQ8y6YowH05r+BuxfN92SX3RbVP8bNymDOGnY7
16PfvzG+4ecrzfzkjPZya/H/ScnXyqwX/JtSrrL5pbrryu1hPKFrZzsrJD6sUuyPwDGdKerJyxmq
dvmdHNCrrzU/+2W0pQ6gSvPl/Mertmi+7hBlDhB80kRUqcNeJCGapHNCz1cvCFwsf0A/Ne++jGMf
TuOJcm6+ZnP9TRR7tWjHreOhZ6huiKnPAP2zfmqpIqHHLG/emnNhyHxSs+JJYfIwj6t2AlLdVneO
3Is9u0R33ef+Wv2pVizPfbUW0rGhps1FRRfnZ/2xsnr3oT2Slh2tvngsLXu6M0OgIen7ufrjprrD
vzXQAgNE22ualqzbyAb97uvl6qF/2a5hcU+eBzVWzOdmVjA0PXQMQoAhsulmBv39oU13134SjSlb
dX85nKW3umfYbtu8713Sylhb2i3v2qaoc8C7S2P3pME8uIGedi1IxXbL+adi+P2fT8Xy/m+/PrxZ
/TrXDcpqOMjotwdo9AJmg8r1N7BySygc+Gp+XaYdJhpV8f/7Oy3Y1s330l09YBDTjnyjn5qHGF7x
6O7hZfMXz21OyLZB6lUfOGAGMzo/bjaL7VaV7Ha76D/1yJVEqKmr+L2nCbH7+959wDtv38JZplQG
BDaonX65d/fwEjNqlDjLVIvM9X+XVxF7
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztG2tz2zbyu34FTh4PqYSi7TT3GM+pM2nj9DzNJZnYaT8kHhoiIYk1X+XDsvrrb3cBkCAJyc61
dzM3c7qrIxGLxWLfuwCP/lTs6k2eTabT6Xd5Xld1yQsWxfBvvGxqweKsqnmS8DoGoMnliu3yhm15
VrM6Z00lWCXqpqjzPKkAFkdLVvDwjq+FU8lBv9h57JemqgEgTJpIsHoTV5NVnCB6+AFIeCpg1VKE
dV7u2DauNyyuPcaziPEoogm4IMLWecHylVxJ4z8/n0wYfFZlnhrUBzTO4rTIyxqpDTpqCb7/yJ2N
dliKXxsgi3FWFSKMV3HI7kVZATOQhm6qh98BKsq3WZLzaJLGZZmXHstL4hLPGE9qUWYceKqBuh17
tGgIUFHOqpwtd6xqiiLZxdl6gpvmRVHmRRnj9LxAYRA/bm+HO7i99SeTa2QX8TekhRGjYGUD3yvc
SljGBW1PSZeoLNYlj0x5+qgUE8W8vNLfql37tY5Tob+vspTX4aYdEmmBFLS/eUk/Wwk1dYwqI0eT
fD2Z1OXuvJNiFaP2yeFPVxcfg6vL64uJeAgFkH5Jzy+QxXJKC8EW7F2eCQObJrtZAgtDUVVSVSKx
YoFU/iBMI/cZL9fVTE7BD/4EZC5s1xcPImxqvkyEN2PPaaiFK4FfZWag90PgqEvY2GLBTid7iT4C
RQfmg2hAihFbgRQkQeyF/80fSuQR+7XJa1AmfNykIquB9StYPgNd7MDgEWIqwNyBmBTJdwDmmxdO
t6QmCxEK3OasP6bwOPA/MG4YHw8bbHOmx9XUYccIOIJTMMMhtenPHQXEOviiVqxuhtLJK78qOFid
C98+BD+/urz22IBp7Jkps9cXb159ensd/HTx8ery/TtYb3rq/8V/8XLaDn36+BYfb+q6OD85KXZF
7EtR+Xm5PlFOsDqpwFGF4iQ66fzSyXRydXH96cP1+/dvr4I3r368eD1YKDw7m05MoA8//hBcvnvz
Hsen0y+Tf4qaR7zm85+kOzpnZ/7p5B340XPDhCft6HE1uWrSlINVsAf4TP6Rp2JeAIX0e/KqAcpL
8/tcpDxO5JO3cSiySoG+FtKBEF58AASBBPftaDKZkBorX+OCJ1jCvzNtA+IBYk5IyknuXQ7TYJ0W
4CJhy9qb+OldhN/BU+M4uA1/y8vMdS46JKADx5XjqckSME+iYBsBIhD/WtThNlIYWi9BUGC7G5jj
mlMJihMR0oX5eSGydhctTKD2obbYm+yHSV4JDC+dQa5zRSxuug0ELQD4E7l1IKrg9cb/BeAVYR4+
TECbDFo/n97MxhuRWLqBjmHv8i3b5uWdyTENbVCphIZhaIzjsh1kr1vddmamO8nyuufAHB2xYTlH
IXcGHqRb4Ap0FEI/4N+Cy2LbMoevUVNqXTGTE99YeIBFCIIW6HlZCi4atJ7xZX4v9KRVnAEemypI
zZlpJV42MTwQ67UL/3laWeFLHiDr/q/T/wM6TTKkWJgxkKIF0XcthKHYCNsJQsq749Q+HZ//in+X
6PtRbejRHH/Bn9JA9EQ1lDuQUU1rVymqJqn7ygNLSWBlg5rj4gGWrmi4W6XkMaSol+8pNXGd7/Mm
iWgWcUraznqNtqKsIAKiVQ7rqnTYa7PaYMkroTdmPI5EwndqVWTlUA0UvNOFyflxNS92x5EP/0fe
WRMJ+ByzjgoM6uoHRJxVDjpkeXh2M3s6e5RZAMHtXoyMe8/+99E6+OzhUqdXjzgcAqScDckHfyjK
2j31WCd/lf326x4jyV/qqk8H6IDS7wWZhpT3oMZQO14MUqQBBxZGmmTlhtzBAlW8KS1MWJz92QPh
BCt+JxbXZSNa75pyMvGqgcJsS8kz6ShfVnmChoq8mHRLGJoGIPiva3Jvy6tAckmgN3WKu3UAJkVZ
W0VJLPI3zaMmERVWSl/a3TgdV4aAY0/c+2GIprdeH0Aq54ZXvK5LtwcIhhJERtC1JuE4W3HQnoXT
UL8CHoIo59DVLi3EvrKmnSlz79/jLfYzr8cMX5Xp7rRjybeL6XO12sxC1nAXfXwqbf4+z1ZJHNb9
pQVoiawdQvIm7gz8yVBwplaNeY/TIdRBRuJvSyh03RHE9Jo8O20rMnsORm/G/XZxDAUL1PooaH4P
6TpVMl+y6RgftlJCnjk11pvK1AHzdoNtAuqvqLYAfCubDKOLzz4kAsRjxadbB5yleYmkhpiiaUJX
cVnVHpgmoLFOdwDxTrscNv9k7MvxLfBfsi+Z+31TlrBKspOI2XE5A+Q9/y98rOIwcxirshRaXLsv
+mMiqSz2ARrIBiZn2PfngZ+4wSkYmamxk9/tK2a/xhqeFEP2WYxVr9tsBlZ9l9dv8iaLfrfRPkqm
jcRRqnPIXQVhKXgtht4qwM2RBbZZFIarA1H698Ys+lgCl4pXygtDPfy6a/G15kpxtW0kgu0leUil
C7U5FePjWnbuMqjkZVJ4q2i/ZdWGMrMltiPveRL3sGvLy5p0KUqwaE6m3HoFwoXtP0p6qWPS9iFB
C2iKYLc9ftwy7HG44CPCjV5dZJEMm9ij5cw5cWY+u5U8ucUVe7k/+BdRCp1Ctv0uvYqIfLlH4mA7
Xe2BOqxhnkXU6yw4BvqlWKG7wbZmWDc86TqutL8aK6na12L4jyQMvVhEQm1KqIKXFIUEtrlVv7lM
sKyaGNZojZUGihe2ufX6twDVAVs/veTYxzJs/Rs6QCV92dQue7kqCpI9b7HI/I/fC2DpnhRcg6rs
sgwRHexLtVYNax3kzRLt7Bx5/uo+j1GrC7TcqCWny3BGIb0tXlrrIR9fTT3cUt9lS6IUl9zR8BH7
KHh0QrGVYYCB5AxIZ0swuTsPO+xbVEKMhtK1gCaHeVmCuyDrGyCD3ZJWa3uJ8ayjFgSvVVh/sCmH
CUIZgj7waJBRSTYS0ZJZHptul9MRkEoLEFk3NvKZShKwliXFAAJ0iT6AB/yWcAeLmvBd55QkDHtJ
yBKUjFUlCO66Au+1zB/cVZOF6M2UE6Rhc5zaqx579uxuOzuQFcvmf1efqOnaMF5rz3Ilnx9KmIew
mDNDIW1LlpHa+ziXraRRm938FLyqRgPDlXxcBwQ9ft4u8gQcLSxg2j+vwGMXKl2wSHpCYtNNeMMB
4Mn5/HDefhkq3dEa0RP9o9qslhnTfZhBVhFYkzo7pKn0pt4qRSeqAvQNLpqBB+4CPEBWdyH/Z4pt
PLxrCvIWK5lYi0zuCCK7DkjkLcG3BQqH9giIeGZ6DeDGGHahl+44dAQ+DqftNPMsPa1XfQizXap2
3WlDN+sDQmMp4OsJkE1ibAjIGRDFMp8zNwGGtnVswVK5Nc07eya4svkh0u2JIQZYz/Quxoj2TXio
rNlmFZp2cUPeGzxWqEZ7lggysdWRGZ9ClHX8929f+8cVHmnh6aiPf0ad3Y+ITgY3DCS57ClKEjVO
1eTF2hZ/urZRtQH9sCU2ze8hWQbTCMwOuVskPBQbUHahO9WDMB5X2Gscg/Wp/5TdQSDsNd8h8VJ7
MObu168V1h09/4PpqL4QYDSC7aQA1eq02Vf/ujjXM/sxz7BjOMfiYOju9eIjb7kE6d+ZbFn1y6OO
A12HlFJ489DcXHfAgMlIC0BOqAUiEfJINm9qTHrRe2z5rrM5XecMEzaDPR6Tqq/IH0hUzTc40Tlz
ZTlAdtCDla6qF0FGk6Q/VDM8ZjmvVJ1txdGRb++4AabAhy7KY31qrMp0BJi3LBG1UzFU/Nb5DvnZ
KpriN+qaa7bwvEHzT7Xw8SYCfjW4pzEckoeC6R2HDfvMCmRQ7ZreZoRlHNNteglOVTbuga2aWMWJ
PW1056q7yBMZbQJnsJO+P97na4beeR+c9tV8Bel0e0SM6yumGAEMQdobK23burWRjvdYrgAGPBUD
/5+mQESQL39xuwNHX/e6CygJoe6Ske2xLkPPuUm6v2ZKz+Wa5IJKWoqpx9ywRdiaObqxMHZBxKnd
PfEITE5FKvfJpyayIuw2qiKxYUXq0Kbq/CAs8KWnc+6+qwKepO0rnN6AlJH/07wcO0Cr55HgB/zO
0Id/j/KXkXw0q0uJWgd5OC2yuk8C2J8iSVbVbU60n1WGjHyY4AyTksFW6o3B0W4r6vFjW+mRYXTK
hvJ6fH+PmdjQ0zwCPuvl823Q63K6IxVKIAKFd6hKMf6y5dd7FVRmwBc//DBHEWIIAXHK71+hoPEo
hT0YZ/fFhKfGVcO3d7F1T7IPxKd3Ld/6jw6yYvaIaT/Kuf+KTRms6JUdSlvslYca1Pol+5RtRBtF
s+9kH3NvOLOczCnM1KwNilKs4gdXe/ouuLRBjkKDOpSE+vveOO839oa/1YU6DfhZf4EoGYkHI2w+
Pzu/abMoGvT0tTuRNakoubyQZ/ZOEFTeWJX51nxewl7lPQi5iWGCDpsAHD6sWdYVtplRiRcYRiQe
S2OmzgslGZpZJHHtOrjOwpl9ng9O5wwWaPaZiylcwyMiSRWWhpIK64FrApopbxF+K/lj7yH1yK0+
E+RzC5VfS2lHIzC3qUTp0NFCdzlWHRViG9fasbGt0s62GIbUyJGqDpX9KuR0oGicO+rrkTbb3Xsw
fqhDdcS2wgGLCoEES5A3sltQSONWT5QLyZRKiBTPGczj0XGXhH5u0Vz6pYK6d4RsGG/IiEOYmMLk
beVj1tY/0/c/yvNeTLbBK5bgjHrliT1xH2gLxXzEsCA3rjyu4tz1rhAjvmGr0jhIevXh8g8mfNYV
gUOEoJB9ZTRvc5nvFpgliSzM7aI5YpGohbo1h8EbT+LbCIiaGg1z2PYYbjEkz9dDQ30233kwih65
NGi3bodYVlG8oEMF6QtRIckXxg9EbFHm93EkIvn6Q7xS8OaLFpXRfIjUhbvU6w41dMfRrDj6gcNG
mV0KChsw1BsSDIjkWYjtHuhYW+WNcKBlA/XH/hqll4aBVUo5VuZ1PbUlyyZ8kUUqaNCdsT2byuby
Nl8nvB4daN/7+2hWqerJijTAYfOwlqaKceFzP0n7MiYLKYcTKEWiuy//RJ3rdyO+Igfdm4QeaD4P
eNOfN24/m7rRHt2hWdP5snR/dNZr+PtMDEXbz/5/rzwH9NJpZyaMhnnCmyzcdClc92QYKT+qkd6e
MbSxDcfWFr6RJCGo4NdvtEioIi5Yyss7PMvPGacDWN5NWDat8bSp3vk3N5gufHbmoXkjm7IzvGKT
iLlqAczFA72/BDnzPOUZxO7IuTFCnMZ4etP2A7BpZiaYn/tvXNyw5+20icZB93OsL9O03DMuJVci
WcnG+WLqTz2WCrw4UC0wpnQnM+oiNR0EKwh5zEiXAErgtmQt/gzlFSN9j1jvr7vQgD4Z3/XKtxlW
1Wke4Vth0v9js58AClGmcVXRa1rdkZ1GEoMSUsMLZB5VPrvFDTjtxRB8RQuQrgQRMrpGDYQqDsBX
mKx25KAnlqkpT4iIFF+5o8siwE8imRqAGg/22JUWg8Yud2wtaoXLnfVvUKiELMyLnfkbCjHI+NWN
QMlQeZ1cAyjGd9cGTQ6APty0eYEWyygf0AMYm5PVpK0+YCXyhxBRFEivclbDqv898EtHmrAePepC
S8VXAqUqBsf6HaTPC6hAI1et0Xdlmq4FccvHPwcB8T4Z9m1evvwb5S5hnIL4qGgC+k7/enpqJGPJ
ylei1zil8rc5xUeB1ipYhdw3STYN3+zpsb8z94XHXhocQhvD+aJ0AcOZh3hezKzlQpgWBONjk0AC
+t3p1JBtiNSVmO0ApaTetR09jBDdid1CK6CPx/2gvkizgwQ4M48pbPLqsGYQZG500QNwtRbcWi2q
LokDU7kh8wZKZ4z3iKRzQGtbQwu8z6DR2TlJOdwAcZ2MFd7ZGLCh88UnAIYb2NkBQFUgmBb7b9x6
lSqKkxPgfgJV8Nm4AqYbxYPq2nZPgZAF0XLtghJOlWvBN9nwwpPQ4SDlMdXc9x7bc8mvCwSXh153
JRW44NVOQWnnd/j6v4rxw5fbgLiY7r9g8hRQRR4ESGoQqHcpie42ap6d38wm/wIwBuVg
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVU1v4jAQPW9+xTT0ANVS1GsrDlRFAqmFqmG72m0rY5IJsRRslDiktNr/vuMQ8tFQpNU2B4I9
H36eeW/SglkgYvBFiLBKYg0LhCRGD1KhA7BjlUQuwkLIHne12HCNNpz5kVrBgsfBmdWCrUrA5VIq
DVEiQWjwRISuDreW5eE+CtodeLeAnhZEGKMGFXqAciMiJVcoNWx4JPgixDjzEj48QVeCfcqmtzfs
cfww+zG4ZfeD2ciGF7gCHaDMPM1jtvuHXAsPfF2rSGeOxV4iDY5GUGb3xVEYv2aj6WQ0vRseAlMY
G5DKsAawwnQUXt2LQOYlzZoYByqhonqoqfxZf4BLD97i4DukgXADCPgGgdOLTK5arYxZB1xnrc9T
EQFcHoZEAa1gSQioo/TPV5FZrDlxJA+NzwF+Ek1UonOzFnKZp6k5mgLBqSkuuAGXS4whJb5xz/xs
wXCHjiVerAk5eh9Kfz1wqOldtVv9dkbscfjgjKeTA8XPrtaNauX5rInOxaHuOReNtpFjo1/OxdFG
5eY9hJ3L3jqcPJbATggXAemDLZX0MNZRYjSDH7C1wMHQh73DyYfTu8a0F9v+6D8W6XNnF1GEIXW/
JrSKPOtnW1YFat9mrLJkzLbyIlTvYzV0RGXcaTBfVLx7jF2PJ2wyuBsydpm7VSVa4C4Zb6pFO2TR
huypCEPwuQjNftUrNl6GsYZzuFrrLdC9iJjQ3omAPBbcI2lsU77tUD43kw1NPZhTrnZWzuQKLomx
Rd4OXM1ByExVVkmoTwfBJ7Lt10Iq1Kgo23Bmd8Ib1KrGbsbO4Pp2yO4fpnf3s6MnZiwuiJuls1/L
Pu4yUCvhpA+vZaJvWWDTr0yFYYyVnHMqCEq+QniuYX225xmnzRENjbXACF3wkCYNVZ1mBwxoR9Iw
WAo3/36oSOTfgjwEEQKt15e9Xpqm52+oaXxszmnE9GLl65RH2OMmS6+u5acKxDmlPgj2eT5/gQOX
LLK0j1y0Uwbmn438VZkVpqlfNKa/YET/53j+99G8H8tUhr9ZSXs2
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJydVm1v4jgQ/s6vmA1wBxUE7X2stJVYlVWR2lK13d6d9laRk0yIr8HmbIe0++tvnIQQB9pbXT5A
Ys/LM55nZtyHx5RrSHiGsMm1gRAh1xhDwU0Kng8hFzMWGb5jBv2E69SDs0TJDdj3MxilxmzPZzP7
pVPMMl+q9bjXh1eZQ8SEkAZULoAbiLnCyGSvvV6SC7IoBcS4Nw0wjcFbvJDcjiuTswzFDpiIQaHJ
lQAjQUi1YRmUboC2uZJig8J4PaCnT5IaDcgsbm/CjinOwgx1KcUTMEhhTgV4g2B1fRk8Le8fv86v
g7v545UHpZB9rKnp+gXsMhxLunIIpwVQxP/l9c/Hq9Xt1epm4R27bva6AJqN92G4YhbMG2i+LB+u
grv71c3dY7B6WtzfLy9bePbp0taDTXSwJQJszUnnp0y57mvpPcrF7ZODyhswtd59+/jdgw+fwBNS
xLSscksUPIDqwwNmCez3PpxGeyBYg6HE0YdcWBxcKczYzuVJi5Wu915vn5oWePCCoPUZBN5B7IgV
MCi54ZDLG7TUZ0HweXkb3M5vFmSpFm/gthhBx0UrveoPpv9AJ9unIbQYdUoe21bKg2q48sPFGVwu
H+afrxd1qvclaNlRFyh1EQ2sSccEuNAGWQwysfVpz1tPajUqbqJUnEcIJkWo6OXDaodK8ZiLdbmM
L1wb+9H0D+pcyPSrX5u5kgWSygRYXCnJUi/KKcuU4cqsAyTKZBiissLc7NFwizvjxtieKBVCIdWz
fzilzPaYyljZN0cGN1v7NnaIPNCGmVy3GKuJaQ6iVjE1Qfm+36hglErwmnAD8hu0dDy4uICBA8ZV
pQr/q/+O0KFW2kjelu9Dgb9SDBsWV4F4x5CswgS0zBVlk5tDMP5bVtUGpslbm81Lu2sdKq7uNMGh
MVQ4fy9xhogC1lS5guhISa0DlBWv0O8odT6/LP+4WZzDV6FzIkEqC0uolGZSZoMnlpxplmD2euaT
O4hkTpPnbztDccey0bhjDaBIqaWQa0uwEtQEwtyU56i4fq54F9IE3ORR6mKriODM4XOYZwaVYLYz
7SPbKkz4i7VkB6/Ot1upDE3znNqYKpM8raa0Bx8vfvntJ32UENsM4aI6gJL+jJwhxhh3jVIDOcpi
m0r2hmEtS8XXXNBk71QCDXTBNhhPiHX2LtHkrVIlhoEshH/EZgdq53Eirqs5iFKMnkOmqZTtr3Xq
djvPTWZT4S3NT5aVLgurMPUWI07BRVYqkQrmtCKohNY8qu9EdACoT6ki0a66XxVF4f9AQ3W38yO5
mWmZmIIpnDFrbXakvKWeZhLwhvrbUH8fahhqD0YUcBDJjEBMQwiznE4y5QbHrbhHBOnUAYzb2tVN
jJa65e+eE2Ya30E2GurxUP8ssA6e/wOnvo3V78d3vTcvMB3n7l3iX1JXWqk=
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9U11vmzAUffevOCVRu+UB9pws29Kl0iq1aVWllaZlcgxciiViItsQdb9+xiQp+dh4QOB7Pu49
XHqY59IgkwVhVRmLmFAZSrGRNkdgykonhFiqSCRW1sJSmJg8wCDT5QrucRCyHn6WFRKhVGmhKwVp
kUpNiS3emup3TY6XIn7DVNQyJUwlrgthJD6n/iCNv72uhCzCpFx9CRkThRQGKe08cWXJ9db/yh/u
pvzl9mn+PLnjj5P5D1yM8QmXlzBkSdXwZ0H/BBc0mEo5FE5qI2jKhclHOOvy9HD/OO/6YO1mX9vx
sY0H/tPIV0dtqel0V7iZvWyNg8XFcBA0ToEqVeqOdNUEQFvN41SumAv32VtJrakQNSmLWmgp4oJM
yDoBHgoydtoEAs47r5wHHnUal5vbJ8oOI+9wI86vb2d8Nrm/4Xy4RZ8R85E4uTZPB5EZPnTaaAGu
E59J8BE2J8XgrkbLeXMlVoQxznEYFYY8uFFdxsKQRx90Giwx9vSueHP1YNaUSFG4vTaErNSYuBOF
lXiVyXa9Sy3JdClEyK1dD6Nos9mEf8iKlOpmqSNTZnYjNEWiUYn2pKNB3ttcLJ3HmYYXy6Un76f7
r8rRsC1TpTJj7f19m5sUf/V3Ir+x/yjtLu8KjLX/CmN/AcVGUUo=
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJyFUkEKgzAQvAfyhz0YaL9QEWpRqlSjWGspFPZQTevFHOr/adQaU1GaUzI7Mzu7ZF89XhKkEJS8
qxaKMMsvboQ+LxxE44VICSW1gEa2UFaibqoS0iyJ0xw2lIA6nX5AHCu1jpRsv5KRjknkac9VLVug
sX9mtzxIeJDE/mg4OGp47qoLo3NHX2jsMB3AiDht5hryAUOEifoTdCXbSh7V0My2NMq/Xbh5MEjU
ZT63gpgNT9lKOJ/CtHsvT99re3pX303kydn4HeyOeAg5cjf2EW1D6HOPkg9NGKhu
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgWIgK0q
FlWqXJpcICVYpGzx2BAZ4uHv5+Hv6wq1BWINXBTdKriEKkI1DhW2QAfhttcxxANiFZCBbglQSJUL
i2dASrm4rFz9XLgAwJNbyQ==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV92L4zYQf/dfMU0ottuse7RvC6FQrg8Lxz2Ugz4si9HacqKuIxlJ2ST313dG8odkO9d7aGBB
luZLv/nNjFacOqUtKJMIvzK3cXlhWgp5MDBsqK5SNYftsBAGpLLA4F1oe2Ytl+9wUvW55TswCi4c
KibhbFDSglXQCFmDPXIwtm7FawLRbwtPzg2T9gf4gupKv4GS0N262w7V0NvpbCy8cvTo3eAus6C5
ETU3ICQZX1hFTw/dzR6V/AW1RCN4/XAtbsVXqIXmlVX6liS4lOzEYY9QFB2zx6LfoSNjz1a0pqT9
QOIfJWQ2E888NEVZNqLlZZnvIB0NpHkimlFdKn2iRRY7yGG/CCJb6Iz280d34SFXBS2yEYPNF0Q7
yM7oCjpWvbEDQmnhRwOs6zjThpKE8HogwRAgraqYFZgGZvzmzVh+mgz9vskT3hruwyjdFcqyENJw
bbMPO5jdzonxK68QKT7B57CMRRG5shRSWDTX3dI8LzRndZbnSWL1zfvriUmK4TcGWSnZiEPCrxXv
bM+sP7VW2is2WgWXCO3sAu3Rzysz3FiNCA8WPyM4gb1JAAmCiyTZbhFjWx3h9SzauuRXC9MFoVbc
yNTCm1QXOOIfIn/g1kGMhDUBN72hI5XCBQtIXQw8UEEdma6Jaz4vJIJ51Orc15hzzmu6TdFp3ogr
Aof0c98tsw1SiaiWotHffk3XYCkqdToxWRfTFXqgpg2khcLluOHMVC0zZhLKIomesfSreUNNgbXi
Ky9VRzwzkBneNoGQyyvGjbsFQqOZvpWIjqH281lJ/jireFgR3cPzSyTGWzQpDNIU+03Fs4XKLkhp
/n0uFnuF6VphB44b3uWRneSbBoMSioqE8oeF0JY+qTvYfEK+bPLYdoR4McfYQ7wMZj39q0kfP8q+
FfsymO0GzNlPh644Jje06ulqHpOEQqdJUfoidI2O4CWx4qOglLye6RrFQirpCRXvhoRqXH3sYdVJ
AItvc+VUsLO2v2hVAWrNIfVGtkG351cUMNncbh/WdowtSPtCdkzYFv6mwYc9o2Jt68ud6wectBr8
hYAulPSlgzH44YbV3ikjrulEaNJxt+/H3wZ7bXSXje/YY4tfVVrVmUstaDwwOBLMg6iduDB0lMVC
UyzYx7Ab4kjCqdViEJmDcdk/SKbgsjYXgfMznUWcrtS4z4fmJ/XOM1LPk/iIpqass5XwNbdnLb1Y
8h3ERXSWZI6rZJxKs1LBqVH65w0Oy4ra0CBYxEeuOMbDmV5GI6E0Ha/wgVTtkX0+OXvqsD02CKLf
XHbeft85D7tTCMYy2Njp4DJP7gWJr6paVWXZ1+/6YXLv/iE0M90FktiI7yFJD9e7SOLhEkkaMTUO
azq9i2woBNR0/0eoF1HFMf0H8ChxH/jgcB34GZIz3Qn4/vid+VEamQrOVqAPTrOfmD4MPdVh09tb
8dLLjvh/61lEP4yW5vJaH4vHcevG8agXvzPGoOhhXNncpTr99PTHx6e/UvffFLaxUSjuSeP286Dw
gtEMcW1xKr/he4/6IQ6FUXP+0gkioHY5iwC9Eyx3HKO7af0zPPe+XyLn7fAY78k4aiR387bCr5XT
5C4rFgwLGfMvJuAMew==
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNUlGL2zAMfvevEBlHEujSsXsL9GGDvW1jD3sZpQQ3Ua7aJXawnbT595Ocpe0dO5ghseVP+vRJ
VpIkn2cYPZknwAvWLXWYhRP5Sk4baKgOWRWNqtpdgTyH2Y5wpq5Tug406YAgKEzkwqg7NBPwR86a
Hk0olPopaK0NHJHzYQPnE5rI0o8+yBUwiBfyQcT8mMPJGiAT0A0O+b8BY4MKJ7zPcSSzHaKrSpJE
qeDmUgGvVbPCS41DgO+6xy/OWbfAThMn/OQ9ukDWRCSLiKzk1yrLjWapq6NnvHUoHXQ4bYPdrsVX
4lQMc/q6ZW975nmSK+oH6wL42a9H65U6aha342Mh0UVDzrD87C1bH73s16R5zsStkBZDp0NrXQ+7
HaRnMo8f06UBnljKoOtn/YT+LtdvSyaT/BtIv9KR60nF9f3qmuYKO4//T9ItJMsjPfgUHqKwCZ3n
xu/Lx8M/UvCLTxW7VULHxB1PRRbrYfvWNY5S8it008jOjcleaMqVBDnUXcWULV2YK9JEQ92OfC96
1Tv4ZicZZZ7GpuEpZbbeQ7DxquVx5hdqoyFSSmXwfC90f1Dc7hjFs/tK99I0fpkI8zSLy4tSy+sI
3vMWehjQNJmE5VePlZbL61nzX3S93ZcfDqznnkb9AZ3GWJU=
""")
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
|
GoogleCloudPlatform/python-compat-runtime | refs/heads/master | appengine-compat/exported_appengine_sdk/google/appengine/_internal/__init__.py | 1333 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
PisiLinuxNew/pisido | refs/heads/master | resources/files/actions_template_perl.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file https://www.gnu.org/licenses/gpl-3.0.txt
from pisi.actionsapi import perlmodules
from pisi.actionsapi import pisitools
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def check():
perlmodules.make("test")
def install():
perlmodules.install()
pisitools.dodoc("")
|
HesselTjeerdsma/Cyber-Physical-Pacman-Game | refs/heads/master | test programs/test/test/lib/python2.7/site-packages/wheel/pkginfo.py | 96 | """Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, mangle_from_=False, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, mangle_from_=False, maxheaderlen=0).flatten(message)
|
fossoult/odoo | refs/heads/8.0 | addons/l10n_bo/__init__.py | 2120 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
xingwu1/autorest | refs/heads/master | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/flatten_parameter_group.py | 8 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FlattenParameterGroup(Model):
"""Additional parameters for the putSimpleProductWithGrouping operation.
:param name: Product name with value 'groupproduct'
:type name: str
:param product_id: Unique identifier representing a specific product for
a given latitude & longitude. For example, uberX in San Francisco will
have a different product_id than uberX in Los Angeles.
:type product_id: str
:param description: Description of product.
:type description: str
:param max_product_display_name: Display name of product.
:type max_product_display_name: str
:param generic_value: Generic URL value.
:type generic_value: str
:param odatavalue: URL value.
:type odatavalue: str
"""
_validation = {
'name': {'required': True},
'product_id': {'required': True},
'max_product_display_name': {'required': True},
}
def __init__(self, name, product_id, max_product_display_name, description=None, generic_value=None, odatavalue=None):
self.name = name
self.product_id = product_id
self.description = description
self.max_product_display_name = max_product_display_name
self.generic_value = generic_value
self.odatavalue = odatavalue
|
Dziolas/invenio | refs/heads/scoap3 | modules/bibformat/lib/elements/bfe_fulltext_mini.py | 10 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints a links to fulltext
"""
__revision__ = "$Id$"
from invenio.bibformat_elements.bfe_fulltext import get_files, sort_alphanumerically
from invenio.messages import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_BASE_URL, CFG_CERN_SITE, CFG_SITE_RECORD
from invenio.urlutils import get_relative_url
from cgi import escape
def format_element(bfo, style, separator='; ', show_icons='no', focus_on_main_file='yes', show_subformat_icons='no'):
"""
This is the format for formatting fulltext links in the mini panel.
@param separator: the separator between urls.
@param style: CSS class of the link
@param show_icons: if 'yes', print icons for fulltexts
@param focus_on_main_file: if 'yes' and a doctype 'Main' is found,
prominently display this doctype. In that case other doctypes are
summarized with a link to the Files tab, named"Additional files".
@param show_subformat_icons: shall we display subformats considered as icons?
"""
_ = gettext_set_language(bfo.lang)
out = ''
# Retrieve files
(parsed_urls, old_versions, additionals) = \
get_files(bfo, distinguish_main_and_additional_files=focus_on_main_file.lower() == 'yes',
include_subformat_icons=show_subformat_icons == 'yes')
main_urls = parsed_urls['main_urls']
others_urls = parsed_urls['others_urls']
if parsed_urls.has_key('cern_urls'):
cern_urls = parsed_urls['cern_urls']
# Prepare style
if style != "":
style = 'class="'+style+'"'
# Build urls list.
# Escape special chars for <a> tag value.
additional_str = ''
if additionals:
additional_str = separator + '<small>(<a '+style+' href="'+CFG_BASE_URL+'/'+ CFG_SITE_RECORD +'/'+str(bfo.recID)+'/files/">%s</a>)</small>' % _("additional files")
versions_str = ''
#if old_versions:
#versions_str = separator + '<small>(<a '+style+' href="'+CFG_SITE_URL+'/CFG_SITE_RECORD/'+str(bfo.recID)+'/files/">%s</a>)</small>' % _("older versions")
if main_urls:
# Put a big file icon if only one file
if len(main_urls.keys()) == 1 and len(main_urls.items()[0][1]) == 1 and \
(not CFG_CERN_SITE or len(cern_urls) == 0) and len(others_urls) == 0 and \
show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-34x48.gif" alt="%s" /><br />' % (CFG_BASE_URL, _("Download fulltext"))
elif show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-12x16.gif" alt="%s"/>' % (CFG_BASE_URL, _("Download fulltext"))
else:
file_icon = ''
main_urls_keys = sort_alphanumerically(main_urls.keys())
for descr in main_urls_keys:
urls = main_urls[descr]
out += '<div><small class="detailedRecordActions">%s:</small> ' % descr
urls_dict = {}
for url, name, url_format in urls:
if name not in urls_dict:
urls_dict[name] = [(url, url_format)]
else:
urls_dict[name].append((url, url_format))
for name, urls_and_format in urls_dict.items():
if len(urls_dict) > 1:
print_name = "<em>%s</em> - " % name
url_list = [print_name]
else:
url_list = []
for url, url_format in urls_and_format:
if CFG_CERN_SITE and url_format == 'ps.gz' and len(urls_and_format) > 1:
## We skip old PS.GZ files
continue
url_list.append('<a %(style)s href="%(url)s">%(file_icon)s%(url_format)s</a>' % {
'style': style,
'url': get_relative_url(escape(url, True)),
'file_icon': file_icon,
'url_format': escape(url_format.upper())
})
out += separator + " ".join(url_list)
out += additional_str + versions_str + separator + "</div>"
if CFG_CERN_SITE and cern_urls:
# Put a big file icon if only one file
if len(main_urls.keys()) == 0 and \
len(cern_urls) == 1 and len(others_urls) == 0 and \
show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-34x48.gif" alt="%s" /><br />' % (CFG_BASE_URL, _("Download fulltext"))
elif show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-12x16.gif" alt="%s"/>' % (CFG_BASE_URL, _("Download fulltext"))
else:
file_icon = ''
link_word = len(cern_urls) == 1 and _('%(x_sitename)s link') or _('%(x_sitename)s links')
out += '<small class="detailedRecordActions">%s:</small><br />' % (link_word % {'x_sitename': 'CERN'})
url_list = []
for url, descr in cern_urls:
url_list.append('<a '+style+' href="'+escape(url)+'">'+file_icon+escape(str(descr))+'</a>')
out += '<small>' + separator.join(url_list) + '</small>'
out += "<br/>"
if others_urls:
# Put a big file icon if only one file
if len(main_urls.keys()) == 0 and \
(not CFG_CERN_SITE or len(cern_urls) == 0) and len(others_urls) == 1 and \
show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-34x48.gif" alt="%s" /><br />' % (CFG_BASE_URL, _("Download fulltext"))
elif show_icons.lower() == 'yes':
file_icon = '<img style="border:none" src="%s/img/file-icon-text-12x16.gif" alt="%s"/>' % (CFG_BASE_URL, _("Download fulltext"))
else:
file_icon = ''
external_link = len(others_urls) == 1 and _('external link') or _('external links')
out += '<small class="detailedRecordActions">%s:</small>%s' % (external_link.capitalize(), separator)
url_list = []
for url, descr in others_urls:
# we don't need to show the plot links here, and all are pngs.
if url.find('.png') > -1:
continue
url_list.append('<a '+style+' href="'+escape(url)+'">'+file_icon+escape(str(descr))+'</a>')
out += '<small>' + separator.join(url_list) + '</small>'
if out.endswith('<br />'):
out = out[:-len('<br />')]
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
stuti-rastogi/leetcodesolutions | refs/heads/master | 001_twoSum.py | 1 | class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Brute-force solution, accepted
# n = len(nums)
# for i in range(n):
# for j in range(i+1, n):
# if nums[i] + nums[j] == target:
# return [i, j]
d = {}
for i, num in enumerate(nums):
if (target - num) in d:
return [i, d[target-num]]
d[num] = i |
nagyistoce/odoo-dev-odoo | refs/heads/8.0 | addons/email_template/__openerp__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name' : 'Email Templates',
'version' : '1.1',
'author' : 'OpenERP SA',
'website' : 'https://www.odoo.com/page/mailing',
'category' : 'Marketing',
'depends' : ['mail'],
'description': """
Email Templating (simplified version of the original Power Email by Openlabs).
==============================================================================
Lets you design complete email templates related to any OpenERP document (Sale
Orders, Invoices and so on), including sender, recipient, subject, body (HTML and
Text). You may also automatically attach files to your templates, or print and
attach a report.
For advanced use, the templates may include dynamic attributes of the document
they are related to. For example, you may use the name of a Partner's country
when writing to them, also providing a safe default in case the attribute is
not defined. Each template contains a built-in assistant to help with the
inclusion of these dynamic values.
If you enable the option, a composition assistant will also appear in the sidebar
of the OpenERP documents to which the template applies (e.g. Invoices).
This serves as a quick way to send a new email based on the template, after
reviewing and adapting the contents, if needed.
This composition assistant will also turn into a mass mailing system when called
for multiple documents at once.
These email templates are also at the heart of the marketing campaign system
(see the ``marketing_campaign`` application), if you need to automate larger
campaigns on any OpenERP document.
**Technical note:** only the templating system of the original Power Email by Openlabs was kept.
""",
'data': [
'wizard/email_template_preview_view.xml',
'email_template_view.xml',
'res_partner_view.xml',
'ir_actions_view.xml',
'wizard/mail_compose_message_view.xml',
'security/ir.model.access.csv'
],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mbayon/TFG-MachineLearning | refs/heads/master | vbig/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/operations.py | 28 | """
SQL functions reference lists:
https://web.archive.org/web/20130407175746/https://www.gaia-gis.it/gaia-sins/spatialite-sql-4.0.0.html
https://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
import sys
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteDistanceOperator(SpatialOperator):
def as_sql(self, connection, lookup, template_params, sql_params):
if lookup.lhs.output_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
sql_template = 'COALESCE(%(func)s(%(lhs)s, %(rhs)s, %%s), 0) %(op)s %(value)s'
template_params.update({
'op': self.op,
'func': connection.ops.spatial_function_name('Distance'),
})
sql_params.insert(1, len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid')
return sql_template % template_params, sql_params
return super(SpatiaLiteDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
geojson = 'AsGeoJSON'
gml = 'AsGML'
intersection = 'Intersection'
kml = 'AsKML'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
makeline = 'MakeLine'
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
# Unary predicates
'isvalid': SpatialOperator(func='IsValid'),
# Binary predicates
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
# Distance predicates
'dwithin': SpatialOperator(func='PtDistWithin'),
'distance_gt': SpatiaLiteDistanceOperator(func='Distance', op='>'),
'distance_gte': SpatiaLiteDistanceOperator(func='Distance', op='>='),
'distance_lt': SpatiaLiteDistanceOperator(func='Distance', op='<'),
'distance_lte': SpatiaLiteDistanceOperator(func='Distance', op='<='),
}
disallowed_aggregates = (aggregates.Extent3D,)
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'MemSize'}
if not self.lwgeom_version():
unsupported |= {'GeoHash', 'IsValid', 'MakeValid'}
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (4, 0, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions 4.0.0 and above.')
return version
def convert_extent(self, box, srid):
"""
Convert the polygon data received from SpatiaLite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def geo_db_type(self, f):
"""
Returns None because geometry columns are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type, **kwargs):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
if lookup_type == 'dwithin':
raise ValueError(
'Only numeric values of degree units are allowed on '
'geographic DWithin queries.'
)
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func('lwgeom_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super(SpatiaLiteOperations, self).get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
|
ronaldahmed/SLAM-for-ugv | refs/heads/master | neural-navigation-with-lstm/MARCO/Robot/Plastk.py | 2 | """
A simple gridworld demo.
This demo instantiates a gridworld and an agent that will learn to get
to the goal in the world using reinforcement learning.
$Id;$
"""
import pdb # We may need the debugger
########################################################################
# Import what we need from PLASTK: the top-level modules,
# and stuff for gridworlds, temporal difference (td) agents, and
# an interface that supports GUIs and logging.
from plastk import *
from plastk.rl.facegridworld import *
from plastk.rl.td import *
from plastk.rl.loggingrli import *
base.min_print_level = base.DEBUG
########################################################################
# Set the TD agent parameters
#
TDAgent.alpha = 0.2
TDAgent.lambda_ = 0.9
TDAgent.gamma = 1.0
TDAgent.action_selection = 'epsilon_greedy'
TDAgent.update_method = 'sarsa'
TDAgent.initial_epsilon = 0.0
TDAgent.min_epsilon = 0.0
LinearTDAgent.initial_w = 0
#FaceGridWorld.correct_action_probability = 0.9
from POMDP.MarkovLoc_L import pomdp
pomdp.map_dir = '../Maps'
# Make a grid environment with the given grid.
env = MarkovLocPOMDPWorld(pomdp=pomdp)
env.setRoute(5,7)
# Make a linear-tabular agent, i.e. an agent that takes a single
# integer as the state and does linear updating
agent = TabularTDAgent(actions = env.actions, num_features = env.num_states)
##agent = MarcoAgent(actions = env.actions, num_features = env.num_states)
# set up the reinforcement-learning interface with agent and env
rli = LoggingRLI(name = "FaceGridworld Demo", rename_old_data = False)
rli.init(agent,env)
# Run the rli GUI with a FaceGridWorldDisplay widget and a widget that
# plots the length of each episode.
rli.gui(FaceGridWorldDisplay, EpisodeVarPlotter('length'))
|
kenglishhi/gae-django-sandbox | refs/heads/master | django/contrib/auth/tests/__init__.py | 12 | from django.contrib.auth.tests.auth_backends import BackendTest, RowlevelBackendTest, AnonymousUserBackendTest, NoAnonymousUserBackendTest
from django.contrib.auth.tests.basic import BASIC_TESTS
from django.contrib.auth.tests.decorators import LoginRequiredTestCase
from django.contrib.auth.tests.forms import FORM_TESTS
from django.contrib.auth.tests.remote_user \
import RemoteUserTest, RemoteUserNoCreateTest, RemoteUserCustomTest
from django.contrib.auth.tests.models import ProfileTestCase
from django.contrib.auth.tests.tokens import TOKEN_GENERATOR_TESTS
from django.contrib.auth.tests.views \
import PasswordResetTest, ChangePasswordTest, LoginTest, LogoutTest
# The password for the fixture data users is 'password'
__test__ = {
'BASIC_TESTS': BASIC_TESTS,
'FORM_TESTS': FORM_TESTS,
'TOKEN_GENERATOR_TESTS': TOKEN_GENERATOR_TESTS,
}
|
stueken/flasky | refs/heads/master | app/auth/views.py | 48 | from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated():
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
onitake/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/netapp_e_storage_system.py | 21 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_storage_system
version_added: "2.2"
short_description: NetApp E-Series Web Services Proxy manage storage arrays
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_password:
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_url:
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
required: true
validate_certs:
description:
- Should https certificates be validated?
type: bool
default: 'yes'
ssid:
description:
- The ID of the array to manage. This value must be unique for each array.
required: true
state:
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
required: true
choices: ['present', 'absent']
controller_addresses:
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
required: true
array_wwn:
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
controller_addresses parameter.
array_password:
description:
- The management password of the array to manage, if set.
enable_trace:
description:
- Enable trace logging for SYMbol calls to the storage system.
type: bool
default: 'no'
meta_tags:
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg:
description: State of request
type: string
returned: always
sample: 'Storage system removed.'
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
(ssid, array_detail['wwn'])
)
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, to_native(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, to_native(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presnt.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
|
newerthcom/savagerebirth | refs/heads/master | libs/python-2.72/Stackless/unittests/test_pickle.py | 1 | import sys
import types
import unittest
import cPickle as pickle
import gc
from stackless import schedule, tasklet, stackless
from support import StacklessTestCase
#because test runner instances in the testsuite contain copies of the old stdin/stdout thingies,
#we need to make it appear that pickling them is ok, otherwise we will fail when pickling
#closures that refer to test runner instances
import copy_reg
import sys
def reduce(obj):
return object, () #just create an empty object instance
copy_reg.pickle(type(sys.stdout), reduce, object)
VERBOSE = False
glist = []
def nothing():
pass
def accumulate(ident, func, *args):
rval = (ident, func(*args))
glist.append(rval)
def get_result():
return glist.pop()
def is_empty():
return len(glist) == 0
def reset():
glist[:] = []
def rectest(nrec, lev=0, lst=None):
if lst is None:
lst = []
lst.append(lev)
if lev < nrec:
rectest(nrec, lev+1, lst)
else:
schedule()
return lst
class TaskletChannel:
def __init__(self):
self.channel = stackless.channel()
def run(self):
self.channel.receive()
class CustomTasklet(tasklet):
__slots__ = [ "name" ]
def listtest(n, when):
for i in range(n):
if i == when:
schedule()
return i
def xrangetest(n, when):
for i in xrange(n):
if i == when:
schedule()
return i
def enumeratetest(n, when):
for i, ig in enumerate([None] * n):
if i == when:
schedule()
return i
def dicttestiterkeys(n, when):
for i in dict([ (i, i) for i in range(n) ]).iterkeys():
if i == when:
schedule()
return n
def dicttestitervalues(n, when):
for i in dict([ (i, i) for i in range(n) ]).itervalues():
if i == when:
schedule()
return n
def dicttestiteritems(n, when):
for (i, j) in dict([ (i, i) for i in range(n) ]).iteritems():
if i == when:
schedule()
return n
def settest(n, when):
for i in set(range(n)):
if i == when:
schedule()
return i
def tupletest(n, when):
for i in tuple(range(n)):
if i == when:
schedule()
return i
def genschedinnertest(n, when):
for i in range(n):
if i == when:
schedule()
yield i
def genschedoutertest(n, when):
for x in genschedinnertest(n, when):
pass
return x
def geninnertest(n):
for x in range(n):
yield x
def genoutertest(n, when):
for i in geninnertest(n):
if i == when:
schedule()
return i
def cellpickling():
"""defect: Initializing a function object with a partially constructed
cell object
"""
def closure():
localvar = the_closure
return
the_closure = closure
del closure
schedule()
return the_closure()
def in_psyco():
try:
return __in_psyco__
except NameError:
return False
def is_soft():
softswitch = stackless.enable_softswitch(0)
stackless.enable_softswitch(softswitch)
return softswitch and not in_psyco()
class TestPickledTasklets(StacklessTestCase):
def setUp(self):
super(TestPickledTasklets, self).setUp()
self.verbose = VERBOSE
def tearDown(self):
# Tasklets created in pickling tests can be left in the scheduler when they finish. We can feel free to
# clean them up for the tests.
mainTasklet = stackless.getmain()
current = mainTasklet.next
while current is not mainTasklet:
next = current.next
current.kill()
current = next
super(TestPickledTasklets, self).tearDown()
del self.verbose
def run_pickled(self, func, *args):
ident = object()
t = tasklet(accumulate)(ident, func, *args)
# clear out old errors
reset()
if self.verbose: print "starting tasklet"
t.run()
self.assertEqual(is_empty(), True)
# do we want to do this??
#t.tempval = None
if self.verbose: print "pickling"
pi = pickle.dumps(t)
# if self.verbose: print repr(pi)
# why do we want to remove it?
# t.remove()
if self.verbose: print "unpickling"
ip = pickle.loads(pi)
if self.verbose: print "starting unpickled tasklet"
if is_soft():
ip.run()
else:
self.assertRaises(RuntimeError, ip.run)
return
new_ident, new_rval = get_result()
t.run()
old_ident, old_rval = get_result()
self.assertEqual(old_ident, ident)
self.assertEqual(new_rval, old_rval)
self.assertNotEqual(new_ident, old_ident)
self.assertEqual(is_empty(), True)
# compatibility to 2.2.3
global have_enumerate
try:
enumerate
have_enumerate = True
except NameError:
have_enumerate = False
global have_fromkeys
try:
{}.fromkeys
have_fromkeys = True
except AttributeError:
have_fromkeys = False
class TestConcretePickledTasklets(TestPickledTasklets):
def testClassPersistence(self):
t1 = CustomTasklet(nothing)()
s = pickle.dumps(t1)
t2 = pickle.loads(s)
self.assertEqual(t1.__class__, t2.__class__)
def testGenerator(self):
self.run_pickled(genoutertest, 20, 13)
def testList(self):
self.run_pickled(listtest, 20, 13)
def testXrange(self):
self.run_pickled(xrangetest, 20, 13)
def testRecursive(self):
self.run_pickled(rectest, 13)
## Pickling of all three dictionary iterator types.
# Test picking of the dictionary keys iterator.
def testDictIterkeys(self):
self.run_pickled(dicttestiterkeys, 20, 13)
# Test picking of the dictionary values iterator.
def testDictItervalues(self):
self.run_pickled(dicttestitervalues, 20, 13)
# Test picking of the dictionary items iterator.
def testDictIteritems(self):
self.run_pickled(dicttestiteritems, 20, 13)
# Test pickling of iteration over the set type.
def testSet(self):
self.run_pickled(settest, 20, 13)
if have_enumerate:
def testEnumerate(self):
self.run_pickled(enumeratetest, 20, 13)
def testTuple(self):
self.run_pickled(tupletest, 20, 13)
def testGeneratorScheduling(self):
self.run_pickled(genschedoutertest, 20, 13)
def testRecursiveLambda(self):
recurse = lambda self, next: \
next-1 and self(self, next-1) or (schedule(), 42)[1]
pickle.loads(pickle.dumps(recurse))
self.run_pickled(recurse, recurse, 13)
def testRecursiveEmbedded(self):
# Avoid self references in this function, to prevent crappy unit testing framework
# magic from getting pickled and refusing to unpickle.
def rectest(verbose, nrec, lev=0):
if verbose: print str(nrec), lev
if lev < nrec:
rectest(verbose, nrec, lev+1)
else:
schedule()
self.run_pickled(rectest, self.verbose, 13)
def testCell(self):
self.run_pickled(cellpickling)
def testFakeModules(self):
types.ModuleType('fakemodule!')
# Crash bug. See SVN revision 45902.
# Unpickling race condition where a tasklet unexpectedly had setstate
# called on it before the channel it was blocked on. This left it
# linked to the channel but not blocked, which meant that when it was
# scheduled it was not linked to the scheduler, and this caused a
# crash when the scheduler tried to remove it when it exited.
def testGarbageCollection(self):
# Make a class that holds onto a reference to a channel
# and then block it on that channel.
tc = TaskletChannel()
t = stackless.tasklet(tc.run)()
t.run()
# Pickle the tasklet that is blocking.
s = pickle.dumps(t)
# Unpickle it again, but don't hold a reference.
pickle.loads(s)
# Force the collection of the unpickled tasklet.
gc.collect()
def testSendSequence(self):
# Send sequence when pickled was not handled. It uses
# a custom cframe execute function which was not recognised
# by the pickling.
#
# Traceback (most recent call last):
# File ".\test_pickle.py", line 283, in testSendSequence
# pickle.dumps(t1)
# ValueError: frame exec function at 1e00bf40 is not registered!
def sender(chan):
l = [ 1, 2, 3, 4 ]
chan.send_sequence(l)
def receiver(chan):
length = 4
while length:
v = chan.receive()
length -= 1
c = stackless.channel()
t1 = stackless.tasklet(sender)(c)
t2 = stackless.tasklet(receiver)(c)
t1.run()
pickle.dumps(t1)
def testSubmoduleImporting(self):
# When a submodule was pickled, it would unpickle as the
# module instead.
import xml.sax
m1 = xml.sax
m2 = pickle.loads(pickle.dumps(m1))
self.assertEqual(m1, m2)
def testFunctionModulePreservation(self):
# The 'module' name on the function was not being preserved.
f1 = lambda: None
f2 = pickle.loads(pickle.dumps(f1))
self.assertEqual(f1.__module__, f2.__module__)
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
leoliujie/odoo | refs/heads/8.0 | openerp/addons/base/res/res_config.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context={}):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, uid, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, uid, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, uid, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
alexsanjoseph/duolingo-save-streak | refs/heads/master | requests/exceptions.py | 93 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
pass
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
pass
|
cwmat/flasky | refs/heads/master | config.py | 79 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME=0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
|
jhona22baz/blog-flask | refs/heads/master | python2.7/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py | 34 | # mysql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database via Jython's zxjdbc JDBC connector.
JDBC Driver
-----------
The official MySQL JDBC driver is at
http://dev.mysql.com/downloads/connector/j/.
Connecting
----------
Connect string format:
mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overriden via a ``create_engine`` URL parameter.
"""
import re
from sqlalchemy import types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mysql.base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0L
for i in value:
v = v << 8 | (i & 0xff)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = 'mysql'
jdbc_driver_name = 'com.mysql.jdbc.Driver'
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _ZxJDBCBit
}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding='UTF-8', yearIsDateType='false')
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self,connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
|
eerwitt/tensorflow | refs/heads/master | tensorflow/python/layers/base.py | 5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the base Layer class, from which all layers inherit.
This is a private class and its internal implementation is subject to changes
in the future.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import variable_scope as vs
class _Layer(object):
"""Base layer class.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Properties:
trainable: Whether the layer should be trained (boolean).
name: The name of the layer (string).
dtype: Default dtype of the layer (dtypes.float32).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
"""
def __init__(self, trainable=True, name=None,
dtype=dtypes.float32, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self._trainable = trainable
self._built = False
self._trainable_variables = []
self._non_trainable_variables = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self.dtype = dtype
# Determine base name (non-unique).
base_name = name
if not name:
base_name = _to_snake_case(self.__class__.__name__)
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
self._scope = next(vs.variable_scope(scope).gen)
else:
self._scope = next(vs.variable_scope(None, default_name=base_name).gen)
# Unique name is borrowed from scope to match variable names.
self.name = self._scope.name
def __setattr__(self, name, value):
if hasattr(self, name):
# Only allow private attributes to be set more than once, under the
# convention that private attributes should only be set from inside
# the class.
# All attributes meant to be set several times should be set to private.
if name[0] != '_':
raise AttributeError('Read-only property cannot be set: %s' % name)
super(_Layer, self).__setattr__(name, value)
@property
def trainable_variables(self):
return self._trainable_variables if self.trainable else []
@property
def non_trainable_variables(self):
return self._non_trainable_variables if self.trainable else self.variables
@property
def trainable_weights(self):
return self.trainable_variables
@property
def non_trainable_weights(self):
return self.non_trainable_variables
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._trainable_variables + self._non_trainable_variables
@property
def updates(self):
return self._updates
@property
def losses(self):
return self._losses
@property
def built(self):
return self._built
@property
def trainable(self):
return self._trainable
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.variables
def build(self, _):
"""Creates the variables of the layer.
"""
self._built = True
def call(self, inputs, **kwargs):
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
raise NotImplementedError
def _compute_output_shape(self, input_shape):
"""Computes the output shape of the layer given the input shape.
Assumes that the layer will be built to match that input shape.
Args:
input_shape: A (possibly nested tuple of) `TensorShape`. It need not
be fully defined (e.g. the batch size may be unknown).
Returns:
A (possibly nested tuple of) `TensorShape`.
Raises:
TypeError: if `input_shape` is not a (possibly nested tuple of)
`TensorShape`.
ValueError: if `input_shape` is incomplete or is incompatible with the
the layer.
"""
raise NotImplementedError
def _add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None, trainable=True,
variable_getter=vs.get_variable):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
variable_getter: The getter to use for TensorFlow variables.
Returns:
The created variable.
"""
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
variable = variable_getter(name,
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable and self.trainable)
# TODO(sguada) fix name = variable.op.name
if variable in existing_variables:
return variable
if regularizer:
# To match the behavior of tf.get_variable(), we only
# apply regularization if the variable is newly created.
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
if trainable:
self._trainable_variables.append(variable)
else:
self._non_trainable_variables.append(variable)
return variable
def __call__(self, inputs, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
# Define a custom getter to override tf.get_variable when creating layer
# variables. We respect current custom getter, if one is set.
current_custom_getter = vs.get_variable_scope().custom_getter
def variable_getter(getter, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, **kwargs):
if current_custom_getter is not None:
getter = functools.partial(current_custom_getter, getter)
return self._add_variable(
name, shape, initializer=initializer, regularizer=regularizer,
dtype=dtype, trainable=trainable,
variable_getter=functools.partial(getter, **kwargs))
# Build (if necessary) and call the layer, inside a variable scope.
with vs.variable_scope(self._scope,
reuse=True if self._built else self._reuse,
custom_getter=variable_getter) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_list = _to_list(inputs)
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
self._built = True
outputs = self.call(inputs, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self._losses.append(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def apply(self, inputs, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, **kwargs)
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collections):
elements = _to_list(elements)
collections = _to_list(collections)
for name in collections:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
|
Azure/azure-sdk-for-python | refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline | sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/image_entity_action_py3.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .image_action_py3 import ImageAction
class ImageEntityAction(ImageAction):
"""Defines an entity action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource. To use the URL,
append query parameters as appropriate and include the
Ocp-Apim-Subscription-Key header.
:vartype read_link: str
:ivar web_search_url: The URL to Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.visualsearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item.
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.visualsearch.models.Thing]
:ivar date_published: The date on which the CreativeWork was published.
:vartype date_published: str
:ivar text: Text content of this creative work.
:vartype text: str
:ivar result: The result produced in the action.
:vartype result:
list[~azure.cognitiveservices.search.visualsearch.models.Thing]
:ivar display_name: A display name for the action.
:vartype display_name: str
:ivar is_top_action: A Boolean representing whether this result is the top
action.
:vartype is_top_action: bool
:ivar service_url: Use this URL to get additional data to determine how to
take the appropriate action. For example, the serviceUrl might return JSON
along with an image URL.
:vartype service_url: str
:ivar action_type: A string representing the type of action.
:vartype action_type: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'date_published': {'readonly': True},
'text': {'readonly': True},
'result': {'readonly': True},
'display_name': {'readonly': True},
'is_top_action': {'readonly': True},
'service_url': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'date_published': {'key': 'datePublished', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'result': {'key': 'result', 'type': '[Thing]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_top_action': {'key': 'isTopAction', 'type': 'bool'},
'service_url': {'key': 'serviceUrl', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ImageEntityAction, self).__init__(**kwargs)
self._type = 'ImageEntityAction'
|
centricular/gstreamer | refs/heads/meson-1.8 | scripts/gst-plot-timeline.py | 24 | #!/usr/bin/env python
#
# based on plot-timeline.py by Federico Mena-Quintero <federico at ximian dotcom>
# example:
# GST_DEBUG_COLOR_MODE=off GST_DEBUG="*:3" gst-launch-1.0 2>debug.log audiotestsrc num-buffers=10 ! audioconvert ! alsasink
# gst-plot-timeline.py debug.log --output=debug.png
import math
import optparse
import os
import re
import sys
import cairo
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 8
# how many pixels for a second on the timeline
PIXELS_PER_SECOND = 300
# how many pixels for one line of log
PIXELS_PER_LINE = 10
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 400
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# assumes GST_DEBUG_LOG_COLOR=1
# timestamp pid thread level category,file,line,msg
mark_regex = re.compile (r'^(\d+:\d+:\d+\.\d+) +\d+ +0?x?[0-9a-f]+ [A-Z]+ +([-a-zA-Z0-9_]+ )(.*)')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
success_result = "0"
skip_lines = 0
max_lines = 500
filter_regex = re.compile ('')
skip_regex = re.compile('')
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log):
BaseMark.__init__(self, timestamp,
'execve: ' + os.path.basename(log))
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34)
]
class SyscallParser:
def __init__ (self):
self.syscalls = []
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestr = m.group (mark_timestamp_group).split(':')
timestamp = float (timestr[2]) + (float (timestr[1]) * 60.0) + (float (timestr[0]) * 3600.0)
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
s = AccessMark (timestamp, text)
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
else:
print 'No log in %s' % str
return
def parse_strace(filename):
parser = SyscallParser ()
global skip_lines
global max_lines
global skip_regex
skip_found = False
for line in file(filename, "r").readlines():
if line == "":
break
if not skip_found:
if skip_regex.search(line):
skip_found = True
else:
continue
if skip_lines > 0:
skip_lines -= 1
continue
if len(parser.syscalls) >= max_lines:
break
if filter_regex.search(line):
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
global PIXELS_PER_SECOND
global PIXELS_PER_LINE
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
time_height = int(math.ceil(last_timestamp * PIXELS_PER_SECOND))
line_height = num_syscalls * PIXELS_PER_LINE
if time_height > line_height:
metrics.height = time_height
print "Adjusting PIXELS_PER_LINE = %d" % PIXELS_PER_LINE
PIXELS_PER_LINE = metrics.height / num_syscalls
print " PIXELS_PER_LINE = %d" % PIXELS_PER_LINE
else:
metrics.height = line_height
print "Adjusting PIXELS_PER_SECOND %d" % PIXELS_PER_SECOND
PIXELS_PER_SECOND = int(math.ceil(metrics.height / last_timestamp))
print " PIXELS_PER_SECOND %d" % PIXELS_PER_SECOND
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
print 'picture size: %d x %d' % (metrics.width, metrics.height);
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def main(args):
global skip_lines
global max_lines
global filter_regex
global skip_regex
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <debug.log>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
option_parser.add_option("-s",
"--skip", dest="skip",
metavar="LINES",
help="Skip a number of loglines at the beginning of the file or wait till a regular expression happens")
option_parser.add_option("-m",
"--max-lines", dest="max",
help="max lines that need to be plotted")
option_parser.add_option("-f",
"--filter", dest="filter",
help="filter the log lines on a regular expression")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an debug log taken with "GST_DEBUG_COLOR_MODE=off GST_DEBUG=XXX <application>"'
return 1
in_filename = args[0]
out_filename = options.output
if options.skip:
try:
skip_lines = int(options.skip)
except:
skip_regex = re.compile(options.skip)
skip_lines = 0
if options.max:
max_lines = int(options.max)
if options.filter:
filter_regex = re.compile(options.filter)
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No logs in %s' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
rdhyee/osf.io | refs/heads/develop | website/addons/wiki/views.py | 1 | # -*- coding: utf-8 -*-
import httplib as http
import logging
from bs4 import BeautifulSoup
from flask import request
from framework.mongo.utils import to_mongo_key
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from website.addons.wiki import settings
from website.addons.wiki import utils as wiki_utils
from website.profile.utils import get_gravatar
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
)
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
from .model import NodeWikiPage
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http.CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http.NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
key = to_mongo_key(name)
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
if key not in node.wiki_pages_versions:
return []
versions = [
NodeWikiPage.load(version_wiki_id)
for version_wiki_id in node.wiki_pages_versions[key]
]
return [
{
'version': version.version,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': '{} UTC'.format(version.date.replace(microsecond=0).isoformat().replace('T', ' ')),
}
for version in reversed(versions)
]
def _get_wiki_pages_current(node):
return [
{
'name': sorted_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=sorted_page.page_name, _guid=True),
'wiki_id': sorted_page._primary_key,
'wiki_content': wiki_page_content(sorted_page.page_name, node=node)
}
for sorted_page in [
node.get_wiki_page(sorted_key)
for sorted_key in sorted(node.wiki_pages_current)
]
# TODO: remove after forward slash migration
if sorted_page is not None
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_widget(**kwargs):
node = kwargs['node'] or kwargs['project']
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = len(node.wiki_pages_current.keys()) >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_page and wiki_page.html(node):
wiki_html = wiki_page.html(node)
if len(wiki_html) > MAX_DISPLAY_LENGTH:
wiki_html = BeautifulSoup(wiki_html[:MAX_DISPLAY_LENGTH] + '...', 'html.parser')
more = True
else:
wiki_html = BeautifulSoup(wiki_html)
rendered_before_update = wiki_page.rendered_before_update
else:
wiki_html = None
ret = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
ret.update(wiki.config.to_json())
return ret
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname)
return {
'wiki_content': wiki_page.content if wiki_page else None,
'wiki_draft': (wiki_page.get_draft(node) if wiki_page
else wiki_utils.get_sharejs_content(node, wname)),
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname, version=wver)
rendered_before_update = wiki_page.rendered_before_update if wiki_page else False
return {
'wiki_content': wiki_page.content if wiki_page else '',
'rendered_before_update': rendered_before_update
}
@must_be_valid_project # injects project
@must_have_permission('write') # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http.NOT_FOUND)
node.delete_node_wiki(wiki_name, auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = node.get_wiki_page(wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in
and not node.is_registration
and (
node.has_permission(auth.user, 'write')
or wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
version = wiki_page.version
is_current = wiki_page.is_current
content = wiki_page.html(node)
rendered_before_update = wiki_page.rendered_before_update
else:
version = 'NA'
is_current = False
content = ''
rendered_before_update = False
if can_edit:
if wiki_key not in node.wiki_private_uuids:
wiki_utils.generate_private_uuid(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
else:
if wiki_key not in node.wiki_pages_current and wiki_key != 'home':
raise WIKI_PAGE_NOT_FOUND_ERROR
if 'edit' in request.args:
if wiki_settings.is_publicly_editable:
raise HTTPError(http.UNAUTHORIZED)
raise HTTPError(http.FORBIDDEN)
sharejs_uuid = None
# Opens 'edit' panel when home wiki is empty
if not content and can_edit and wiki_name == 'home':
panels_used.append('edit')
# Default versions for view and compare
version_settings = {
'view': view or ('preview' if 'edit' in panels_used else 'current'),
'compare': compare or 'previous',
}
ret = {
'wiki_id': wiki_page._primary_key if wiki_page else None,
'wiki_name': wiki_page.page_name if wiki_page else wiki_name,
'wiki_content': content,
'rendered_before_update': rendered_before_update,
'page': wiki_page,
'version': version,
'versions': versions,
'sharejs_uuid': sharejs_uuid or '',
'sharejs_url': settings.SHAREJS_URL,
'is_current': is_current,
'version_settings': version_settings,
'pages_current': _get_wiki_pages_current(node),
'category': node.category,
'panels_used': panels_used,
'num_columns': num_columns,
'urls': {
'api': _get_wiki_api_urls(node, wiki_name, {
'content': node.api_url_for('wiki_page_content', wname=wiki_name),
'draft': node.api_url_for('wiki_page_draft', wname=wiki_name),
}),
'web': _get_wiki_web_urls(node, wiki_name),
'gravatar': get_gravatar(auth.user, 25),
},
}
ret.update(_view_project(node, auth, primary=True))
ret['user']['can_edit_wiki_body'] = can_edit
return ret
@must_be_valid_project # injects node or project
@must_have_write_permission_or_public_wiki # injects user
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit_post(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
redirect_url = node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True)
form_wiki_content = request.form['content']
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
# Only update node wiki if content has changed
if form_wiki_content != wiki_page.content:
node.update_node_wiki(wiki_page.page_name, form_wiki_content, auth)
ret = {'status': 'success'}
else:
ret = {'status': 'unmodified'}
else:
# update_node_wiki will create a new wiki page because a page
node.update_node_wiki(wiki_name, form_wiki_content, auth)
ret = {'status': 'success'}
return ret, http.FOUND, None, redirect_url
@must_be_valid_project # injects node or project
@must_have_permission('admin')
@must_not_be_registration
@must_have_addon('wiki', 'node')
def edit_wiki_settings(node, auth, **kwargs):
wiki_settings = node.get_addon('wiki')
permissions = request.get_json().get('permission', None)
if not wiki_settings:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Cannot change wiki settings without a wiki'
))
if permissions == 'public':
permissions = True
elif permissions == 'private':
permissions = False
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Permissions flag used is incorrect.'
))
try:
wiki_settings.set_editing(permissions, auth, log=True)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_logged_in
@must_be_valid_project
def get_node_wiki_permissions(node, auth, **kwargs):
return wiki_utils.serialize_wiki_settings(auth.user, [node._id])
@must_be_valid_project
@must_have_addon('wiki', 'node')
def project_wiki_home(**kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname='home', _guid=True))
@must_be_valid_project # injects project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_id_page(auth, wid, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(id=wid)
if wiki_page:
return redirect(node.web_url_for('project_wiki_view', wname=wiki_page.page_name, _guid=True))
else:
raise WIKI_PAGE_NOT_FOUND_ERROR
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?edit&view&menu')
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_compare(wname, wver, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?view&compare={0}&menu'.format(wver))
@must_not_be_registration
@must_have_permission('write')
@must_have_addon('wiki', 'node')
def project_wiki_rename(auth, wname, **kwargs):
"""View that handles user the X-editable input for wiki page renaming.
:param wname: The target wiki page name.
:param-json value: The new wiki page name.
"""
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
new_wiki_name = request.get_json().get('value', None)
try:
node.rename_node_wiki(wiki_name, new_wiki_name, auth)
except NameEmptyError:
raise WIKI_NAME_EMPTY_ERROR
except NameInvalidError as error:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid name',
message_long=error.args[0]
))
except NameMaximumLengthError:
raise WIKI_NAME_MAXIMUM_LENGTH_ERROR
except PageCannotRenameError:
raise WIKI_PAGE_CANNOT_RENAME_ERROR
except PageConflictError:
raise WIKI_PAGE_CONFLICT_ERROR
except PageNotFoundError:
raise WIKI_PAGE_NOT_FOUND_ERROR
else:
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, new_wiki_name)
wiki_utils.broadcast_to_sharejs('redirect', sharejs_uuid, node, new_wiki_name)
@must_be_valid_project # returns project
@must_have_permission('write') # returns user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_validate_name(wname, auth, node, **kwargs):
wiki_name = wname.strip()
wiki_key = to_mongo_key(wiki_name)
if wiki_key in node.wiki_pages_current or wiki_key == 'home':
raise HTTPError(http.CONFLICT, data=dict(
message_short='Wiki page name conflict.',
message_long='A wiki page with that name already exists.'
))
else:
node.update_node_wiki(wiki_name, '', auth)
return {'message': wiki_name}
@must_be_valid_project
@must_be_contributor_or_public
def project_wiki_grid_data(auth, node, **kwargs):
pages = []
project_wiki_pages = {
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_project_wiki_pages(node, auth)
}
pages.append(project_wiki_pages)
component_wiki_pages = {
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_component_wiki_pages(node, auth)
}
if len(component_wiki_pages['children']) > 0:
pages.append(component_wiki_pages)
return pages
def format_home_wiki_page(node):
home_wiki = node.get_wiki_page('home')
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
if home_wiki:
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_wiki._primary_key,
}
}
return home_wiki_page
def format_project_wiki_pages(node, auth):
pages = []
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
project_wiki_pages = _get_wiki_pages_current(node)
home_wiki_page = format_home_wiki_page(node)
pages.append(home_wiki_page)
for wiki_page in project_wiki_pages:
if wiki_page['name'] != 'home':
has_content = bool(wiki_page['wiki_content'].get('wiki_content'))
page = {
'page': {
'url': wiki_page['url'],
'name': wiki_page['name'],
'id': wiki_page['wiki_id'],
}
}
if can_edit or has_content:
pages.append(page)
return pages
def format_component_wiki_pages(node, auth):
pages = []
for node in node.nodes:
if any([node.is_deleted,
not node.can_view(auth),
not node.has_addon('wiki')]):
continue
else:
serialized = serialize_component_wiki(node, auth)
if serialized:
pages.append(serialized)
return pages
def serialize_component_wiki(node, auth):
children = []
url = node.web_url_for('project_wiki_view', wname='home', _guid=True)
home_has_content = bool(wiki_page_content('home', node=node).get('wiki_content'))
component_home_wiki = {
'page': {
'url': url,
'name': 'Home',
# Handle pointers
'id': node._primary_key if node.primary else node.node._primary_key,
}
}
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
if can_edit or home_has_content:
children.append(component_home_wiki)
for page in _get_wiki_pages_current(node):
if page['name'] != 'home':
has_content = bool(page['wiki_content'].get('wiki_content'))
component_page = {
'page': {
'url': page['url'],
'name': page['name'],
'id': page['wiki_id'],
}
}
if can_edit or has_content:
children.append(component_page)
if len(children) > 0:
component = {
'page': {
'name': node.title,
'url': url,
},
'kind': 'component',
'category': node.category,
'pointer': not node.primary,
'children': children,
}
return component
return None
|
jmighion/ansible | refs/heads/devel | lib/ansible/plugins/lookup/cartesian.py | 141 | # (c) 2013, Bradley Young <young.bradley@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: cartesian
version_added: "2.1"
short_description: returns the cartesian product of lists
description:
- Takes the input lists and returns a list that represents the product of the input lists.
- It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
You can see the exact syntax in the examples section.
options:
_raw:
description:
- a set of lists
required: True
"""
EXAMPLES = """
- name: Example of the change in the description
debug: msg="{{ [1,2,3]|lookup('cartesian', [a, b])}}"
- name: loops over the cartesian product of the supplied lists
debug: msg="{{item}}"
with_cartesian:
- "{{list1}}"
- "{{list2}}"
- [1,2,3,4,5,6]
"""
RETURN = """
_list:
description:
- list of lists composed of elements of the input lists
type: lists
"""
from itertools import product
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Create the cartesian product of lists
"""
def _lookup_variables(self, terms):
"""
Turn this:
terms == ["1,2,3", "a,b"]
into this:
terms == [[1,2,3], [a, b]]
"""
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_cartesian requires at least one element in each list")
return [self._flatten(x) for x in product(*my_list)]
|
invisiblek/python-for-android | refs/heads/master | python-modules/zope/zope/interface/declarations.py | 50 | ##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
$Id: declarations.py 110736 2010-04-11 10:59:30Z regebro $
"""
__docformat__ = 'restructuredtext'
import sys
import weakref
from zope.interface.interface import InterfaceClass, Specification
from zope.interface.interface import SpecificationBase
from types import ModuleType, MethodType, FunctionType
from zope.interface.advice import addClassAdvisor
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
class Declaration(Specification):
"""Interface declarations"""
def __init__(self, *interfaces):
Specification.__init__(self, _normalizeargs(interfaces))
def changed(self, originally_changed):
Specification.changed(self, originally_changed)
try:
del self._v_attrs
except AttributeError:
pass
def __contains__(self, interface):
"""Test whether an interface is in the specification
for example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> spec = Declaration(I2, I3)
>>> spec = Declaration(I4, spec)
>>> int(I1 in spec)
0
>>> int(I2 in spec)
1
>>> int(I3 in spec)
1
>>> int(I4 in spec)
1
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
for example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> spec = Declaration(I2, I3)
>>> spec = Declaration(I4, spec)
>>> i = iter(spec)
>>> [x.getName() for x in i]
['I4', 'I2', 'I3']
>>> list(i)
[]
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
for example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> spec = Declaration(I2, I3)
>>> spec = Declaration(I4, spec)
>>> i = spec.flattened()
>>> [x.getName() for x in i]
['I4', 'I2', 'I1', 'I3', 'Interface']
>>> list(i)
[]
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
Examples:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> spec = Declaration()
>>> [iface.getName() for iface in spec]
[]
>>> spec -= I1
>>> [iface.getName() for iface in spec]
[]
>>> spec -= Declaration(I1, I2)
>>> [iface.getName() for iface in spec]
[]
>>> spec = Declaration(I2, I4)
>>> [iface.getName() for iface in spec]
['I2', 'I4']
>>> [iface.getName() for iface in spec - I4]
['I2']
>>> [iface.getName() for iface in spec - I1]
['I4']
>>> [iface.getName() for iface
... in spec - Declaration(I3, I4)]
['I2']
"""
return Declaration(
*[i for i in self.interfaces()
if not [j for j in other.interfaces()
if i.extends(j, 0)]
]
)
def __add__(self, other):
"""Add two specifications or a specification and an interface
Examples:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> spec = Declaration()
>>> [iface.getName() for iface in spec]
[]
>>> [iface.getName() for iface in spec+I1]
['I1']
>>> [iface.getName() for iface in I1+spec]
['I1']
>>> spec2 = spec
>>> spec += I1
>>> [iface.getName() for iface in spec]
['I1']
>>> [iface.getName() for iface in spec2]
[]
>>> spec2 += Declaration(I3, I4)
>>> [iface.getName() for iface in spec2]
['I3', 'I4']
>>> [iface.getName() for iface in spec+spec2]
['I1', 'I3', 'I4']
>>> [iface.getName() for iface in spec2+spec]
['I3', 'I4', 'I1']
"""
seen = {}
result = []
for i in self.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(Declaration):
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
__name__ = '?'
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def implementedByFallback(cls):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
for example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(I1): pass
...
>>> class I3(Interface): pass
...
>>> class I4(I3): pass
...
>>> class C1(object):
... implements(I2)
>>> class C2(C1):
... implements(I3)
>>> [i.getName() for i in implementedBy(C2)]
['I3', 'I2']
Really, any object should be able to receive a successful answer, even
an instance:
>>> class Callable(object):
... def __call__(self):
... return self
>>> implementedBy(Callable())
<implementedBy zope.interface.declarations.?>
Note that the name of the spec ends with a '?', because the `Callable`
instance does not have a `__name__` attribute.
"""
# This also manages storage of implementation specifications
try:
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements(*_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements(*[implementedBy(c) for c in bases])
spec.inherit = cls
spec.__name__ = (getattr(cls, '__module__', '?') or '?') + \
'.' + (getattr(cls, '__name__', '?') or '?')
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and
'__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
implementedBy = implementedByFallback
def classImplementsOnly(cls, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations.
Consider the following example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
>>> class I3(Interface): pass
...
>>> class I4(Interface): pass
...
>>> class A(object):
... implements(I3)
>>> class B(object):
... implements(I4)
>>> class C(A, B):
... pass
>>> classImplementsOnly(C, I1, I2)
>>> [i.getName() for i in implementedBy(C)]
['I1', 'I2']
Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
whatever interfaces instances of ``A`` and ``B`` implement.
"""
spec = implementedBy(cls)
spec.declared = ()
spec.inherit = None
classImplements(cls, *interfaces)
def classImplements(cls, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared.
Consider the following example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
>>> class I3(Interface): pass
...
>>> class I4(Interface): pass
...
>>> class I5(Interface): pass
...
>>> class A(object):
... implements(I3)
>>> class B(object):
... implements(I4)
>>> class C(A, B):
... pass
>>> classImplements(C, I1, I2)
>>> [i.getName() for i in implementedBy(C)]
['I1', 'I2', 'I3', 'I4']
>>> classImplements(C, I5)
>>> [i.getName() for i in implementedBy(C)]
['I1', 'I2', 'I5', 'I3', 'I4']
Instances of ``C`` provide ``I1``, ``I2``, ``I5``, and whatever
interfaces instances of ``A`` and ``B`` provide.
"""
spec = implementedBy(cls)
spec.declared += tuple(_normalizeargs(interfaces))
# compute the bases
bases = []
seen = {}
for b in spec.declared:
if b not in seen:
seen[b] = 1
bases.append(b)
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen[b] = 1
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
classImplements(cls, *interfaces)
return cls
class implementer:
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
classImplements(ob, *self.interfaces)
return ob
spec = Implements(*self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only:
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method pr function....
raise ValueError('The implementor_only decorator is not '
'supported for methods or functions.')
else:
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, classImplements):
frame = sys._getframe(2)
locals = frame.f_locals
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if (locals is frame.f_globals) or (
('__module__' not in locals) and sys.version_info[:3] > (2, 2, 0)):
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, classImplements
addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
Consider the following example::
>>> from zope.interface import Interface
>>> class IA1(Interface): pass
...
>>> class IA2(Interface): pass
...
>>> class IB(Interface): pass
...
>>> class IC(Interface): pass
...
>>> class A(object):
... implements(IA1, IA2)
>>> class B(object):
... implements(IB)
>>> class C(A, B):
... implements(IC)
>>> ob = C()
>>> int(IA1 in providedBy(ob))
1
>>> int(IA2 in providedBy(ob))
1
>>> int(IB in providedBy(ob))
1
>>> int(IC in providedBy(ob))
1
Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` implement.
"""
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
Consider the following example::
>>> from zope.interface import Interface
>>> class IA1(Interface): pass
...
>>> class IA2(Interface): pass
...
>>> class IB(Interface): pass
...
>>> class IC(Interface): pass
...
>>> class A(object):
... implements(IA1, IA2)
>>> class B(object):
... implements(IB)
>>> class C(A, B):
... implementsOnly(IC)
>>> ob = C()
>>> int(IA1 in providedBy(ob))
0
>>> int(IA2 in providedBy(ob))
0
>>> int(IB in providedBy(ob))
0
>>> int(IC in providedBy(ob))
1
Instances of ``C`` implement ``IC``, regardless of what
instances of ``A`` and ``B`` implement.
"""
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement __provides__, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
For example:
>>> from zope.interface import Interface
>>> class IFooFactory(Interface): pass
...
>>> class C(object):
... pass
>>> C.__provides__ = ProvidesClass(C, IFooFactory)
>>> [i.getName() for i in C.__provides__]
['IFooFactory']
>>> getattr(C(), '__provides__', 0)
0
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces):
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
(Note that, in the examples below, we are going to make assertions about
the size of the weakvalue dictionary. For the assertions to be
meaningful, we need to force garbage collection to make sure garbage
objects are, indeed, removed from the system. Depending on how Python
is run, we may need to make multiple calls to be sure. We provide a
collect function to help with this:
>>> import gc
>>> def collect():
... for i in range(4):
... gc.collect()
)
>>> collect()
>>> before = len(InstanceDeclarations)
>>> class C(object):
... pass
>>> from zope.interface import Interface
>>> class I(Interface):
... pass
>>> c1 = C()
>>> c2 = C()
>>> len(InstanceDeclarations) == before
1
>>> directlyProvides(c1, I)
>>> len(InstanceDeclarations) == before + 1
1
>>> directlyProvides(c2, I)
>>> len(InstanceDeclarations) == before + 1
1
>>> del c1
>>> collect()
>>> len(InstanceDeclarations) == before + 1
1
>>> del c2
>>> collect()
>>> len(InstanceDeclarations) == before
1
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
try:
from types import ClassType
DescriptorAwareMetaClasses = ClassType, type
except ImportError: # Python 3
DescriptorAwareMetaClasses = (type,)
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
Consider the following example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
>>> class IA1(Interface): pass
...
>>> class IA2(Interface): pass
...
>>> class IB(Interface): pass
...
>>> class IC(Interface): pass
...
>>> class A(object):
... implements(IA1, IA2)
>>> class B(object):
... implements(IB)
>>> class C(A, B):
... implements(IC)
>>> ob = C()
>>> directlyProvides(ob, I1, I2)
>>> int(I1 in providedBy(ob))
1
>>> int(I2 in providedBy(ob))
1
>>> int(IA1 in providedBy(ob))
1
>>> int(IA2 in providedBy(ob))
1
>>> int(IB in providedBy(ob))
1
>>> int(IC in providedBy(ob))
1
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``.
To remove directly provided interfaces, use ``directlyProvidedBy`` and
subtract the unwanted interfaces. For example:
>>> directlyProvides(ob, directlyProvidedBy(ob)-I2)
>>> int(I1 in providedBy(ob))
1
>>> int(I2 in providedBy(ob))
0
removes I2 from the interfaces directly provided by ``ob``. The object,
``ob`` no longer directly provides ``I2``, although it might still
provide ``I2`` if it's class implements ``I2``.
To add directly provided interfaces, use ``directlyProvidedBy`` and
include additional interfaces. For example:
>>> int(I2 in providedBy(ob))
0
>>> directlyProvides(ob, directlyProvidedBy(ob), I2)
adds ``I2`` to the interfaces directly provided by ob::
>>> int(I2 in providedBy(ob))
1
"""
# We need to avoid setting this attribute on meta classes that
# don't support descriptors.
# We can do away with this check when we get rid of the old EC
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
if not isinstance(object, DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
Consider the following example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
>>> class IA1(Interface): pass
...
>>> class IA2(Interface): pass
...
>>> class IB(Interface): pass
...
>>> class IC(Interface): pass
...
>>> class A(object):
... implements(IA1, IA2)
>>> class B(object):
... implements(IB)
>>> class C(A, B):
... implements(IC)
>>> ob = C()
>>> directlyProvides(ob, I1)
>>> int(I1 in providedBy(ob))
1
>>> int(I2 in providedBy(ob))
0
>>> int(IA1 in providedBy(ob))
1
>>> int(IA2 in providedBy(ob))
1
>>> int(IB in providedBy(ob))
1
>>> int(IC in providedBy(ob))
1
>>> alsoProvides(ob, I2)
>>> int(I1 in providedBy(ob))
1
>>> int(I2 in providedBy(ob))
1
>>> int(IA1 in providedBy(ob))
1
>>> int(IA2 in providedBy(ob))
1
>>> int(IB in providedBy(ob))
1
>>> int(IC in providedBy(ob))
1
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``. Notice that the
alsoProvides just extends the provided interfaces.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface):
"""
This removes a directly provided interface from an object.
Consider the following two interfaces:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
``I1`` is provided through the class, ``I2`` is directly provided
by the object:
>>> class C(object):
... implements(I1)
>>> c = C()
>>> alsoProvides(c, I2)
>>> I2.providedBy(c)
True
Remove I2 from c again:
>>> noLongerProvides(c, I2)
>>> I2.providedBy(c)
False
Removing an interface that is provided through the class is not possible:
>>> noLongerProvides(c, I1)
Traceback (most recent call last):
...
ValueError: Can only remove directly provided interfaces.
"""
directlyProvides(object, directlyProvidedBy(object)-interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
class ClassProvidesBasePy(object):
def __get__(self, inst, cls):
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
ClassProvidesBase = ClassProvidesBasePy
# Try to get C base:
try:
import _zope_interface_coptimizations
except ImportError:
pass
else:
from _zope_interface_coptimizations import ClassProvidesBase
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class __provides__
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
For example:
>>> from zope.interface import Interface
>>> class IFooFactory(Interface):
... pass
>>> class IFoo(Interface):
... pass
>>> class C(object):
... implements(IFoo)
... classProvides(IFooFactory)
>>> [i.getName() for i in C.__provides__]
['IFooFactory']
>>> [i.getName() for i in C().__provides__]
['IFoo']
"""
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an ``IDeclaration``.
"""
provides = getattr(object, "__provides__", None)
if (provides is None # no spec
or
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
For example:
>>> from zope.interface import Interface
>>> class IFoo(Interface): pass
...
>>> class IFooFactory(Interface): pass
...
>>> class C(object):
... implements(IFoo)
... classProvides(IFooFactory)
>>> [i.getName() for i in C.__providedBy__]
['IFooFactory']
>>> [i.getName() for i in C().__providedBy__]
['IFoo']
if equivalent to:
>>> from zope.interface import Interface
>>> class IFoo(Interface): pass
...
>>> class IFooFactory(Interface): pass
...
>>> class C(object):
... implements(IFoo)
>>> directlyProvides(C, IFooFactory)
>>> [i.getName() for i in C.__providedBy__]
['IFooFactory']
>>> [i.getName() for i in C().__providedBy__]
['IFoo']
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider:
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
For example:
>>> from zope.interface import Interface
>>> class I1(Interface): pass
...
>>> class I2(Interface): pass
...
>>> class I3(Interface): pass
...
>>> class I31(I3): pass
...
>>> class I4(Interface): pass
...
>>> class I5(Interface): pass
...
>>> class A(object):
... implements(I1)
>>> class B(object): __implemented__ = I2
...
>>> class C(A, B):
... implements(I31)
>>> c = C()
>>> directlyProvides(c, I4)
>>> [i.getName() for i in providedBy(c)]
['I4', 'I31', 'I1', 'I2']
>>> [i.getName() for i in providedBy(c).flattened()]
['I4', 'I31', 'I3', 'I1', 'I2', 'Interface']
>>> int(I1 in providedBy(c))
1
>>> int(I3 in providedBy(c))
0
>>> int(providedBy(c).extends(I3))
1
>>> int(providedBy(c).extends(I31))
1
>>> int(providedBy(c).extends(I5))
0
>>> class COnly(A, B):
... implementsOnly(I31)
>>> class D(COnly):
... implements(I5)
>>> c = D()
>>> directlyProvides(c, I4)
>>> [i.getName() for i in providedBy(c)]
['I4', 'I5', 'I31']
>>> [i.getName() for i in providedBy(c).flattened()]
['I4', 'I5', 'I31', 'I3', 'Interface']
>>> int(I1 in providedBy(c))
0
>>> int(I3 in providedBy(c))
0
>>> int(providedBy(c).extends(I3))
1
>>> int(providedBy(c).extends(I1))
0
>>> int(providedBy(c).extends(I31))
1
>>> int(providedBy(c).extends(I5))
1
"""
return Provides(cls, direct)
def getObjectSpecification(ob):
provides = getattr(ob, '__provides__', None)
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
def providedBy(ob):
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
class ObjectSpecificationDescriptorPy(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces peovided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
For example:
>>> from zope.interface import Interface
>>> class IFoo(Interface): pass
...
>>> class IFooFactory(Interface): pass
...
>>> class C(object):
... implements(IFoo)
... classProvides(IFooFactory)
>>> [i.getName() for i in C.__providedBy__]
['IFooFactory']
>>> [i.getName() for i in C().__providedBy__]
['IFoo']
"""
# Get an ObjectSpecification bound to either an instance or a class,
# depending on how we were accessed.
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
ObjectSpecificationDescriptor = ObjectSpecificationDescriptorPy
##############################################################################
def _normalizeargs(sequence, output = None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = Declaration()
try:
import _zope_interface_coptimizations
except ImportError:
pass
else:
from _zope_interface_coptimizations import implementedBy, providedBy
from _zope_interface_coptimizations import getObjectSpecification
from _zope_interface_coptimizations import ObjectSpecificationDescriptor
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
|
rchillyard/INFO6205 | refs/heads/master | Python/test/test_bqs/test_linkedlist.py | 1 | import unittest
from bqs.linkedlist_elements import LinkedListElements
class TestBagArray(unittest.TestCase):
def test_linkedlist1(self):
_list = LinkedListElements()
self.assertTrue(_list.is_empty())
_list.add(1)
self.assertEqual(1, _list.get_head())
self.assertFalse(_list.is_empty())
self.assertEqual(1, _list.remove())
def test_linkedlist2(self):
_list = LinkedListElements()
self.assertTrue(_list.is_empty())
_list.add(1)
_list.add(2)
_list.add(3)
self.assertEqual(3, _list.get_head())
self.assertEqual(3, _list.remove())
self.assertEqual(2, _list.get_head())
if __name__ == "__main__":
unittest.main()
|
detrout/telepathy-python | refs/heads/master | src/interfaces.py | 9 | # telepathy-python - Base classes defining the interfaces of the Telepathy framework
#
# Copyright (C) 2005, 2006 Collabora Limited
# Copyright (C) 2005, 2006 Nokia Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from telepathy._generated.interfaces import *
# Backwards compatibility
CONN_MGR_INTERFACE = CONNECTION_MANAGER
CONN_INTERFACE = CONNECTION
CHANNEL_INTERFACE = CHANNEL
CHANNEL_HANDLER_INTERFACE = CHANNEL_HANDLER
# More backwards compatibility
CONN_INTERFACE_ALIASING = CONNECTION_INTERFACE_ALIASING
CONN_INTERFACE_AVATARS = CONNECTION_INTERFACE_AVATARS
CONN_INTERFACE_CAPABILITIES = CONNECTION_INTERFACE_CAPABILITIES
CONN_INTERFACE_PRESENCE = CONNECTION_INTERFACE_PRESENCE
CONN_INTERFACE_RENAMING = CONNECTION_INTERFACE_RENAMING
|
apophys/freeipa | refs/heads/master | ipatests/pytest_plugins/integration/env_config.py | 1 | # Authors:
# Petr Viktorin <pviktori@redhat.com>
# Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Support for configuring multihost testing via environment variables
This is here to support tests configured for Beaker,
such as the ones at https://github.com/freeipa/tests/
"""
import os
import json
import collections
import six
from ipapython import ipautil
from ipatests.pytest_plugins.integration.config import Config, Domain
from ipalib.constants import MAX_DOMAIN_LEVEL
TESTHOST_PREFIX = 'TESTHOST_'
_SettingInfo = collections.namedtuple('Setting', 'name var_name default')
_setting_infos = (
# Directory on which test-specific files will be stored,
_SettingInfo('test_dir', 'IPATEST_DIR', '/root/ipatests'),
# File with root's private RSA key for SSH (default: ~/.ssh/id_rsa)
_SettingInfo('ssh_key_filename', 'IPA_ROOT_SSH_KEY', None),
# SSH password for root (used if root_ssh_key_filename is not set)
_SettingInfo('ssh_password', 'IPA_ROOT_SSH_PASSWORD', None),
_SettingInfo('admin_name', 'ADMINID', 'admin'),
_SettingInfo('admin_password', 'ADMINPW', 'Secret123'),
_SettingInfo('dirman_dn', 'ROOTDN', 'cn=Directory Manager'),
_SettingInfo('dirman_password', 'ROOTDNPWD', None),
# 8.8.8.8 is probably the best-known public DNS
_SettingInfo('dns_forwarder', 'DNSFORWARD', '8.8.8.8'),
_SettingInfo('nis_domain', 'NISDOMAIN', 'ipatest'),
_SettingInfo('ntp_server', 'NTPSERVER', None),
_SettingInfo('ad_admin_name', 'ADADMINID', 'Administrator'),
_SettingInfo('ad_admin_password', 'ADADMINPW', 'Secret123'),
_SettingInfo('ipv6', 'IPv6SETUP', False),
_SettingInfo('debug', 'IPADEBUG', False),
_SettingInfo('domain_level', 'DOMAINLVL', MAX_DOMAIN_LEVEL),
_SettingInfo('log_journal_since', 'LOG_JOURNAL_SINCE', '-1h'),
)
def get_global_config(env=None):
"""Create a test config from environment variables
If env is None, uses os.environ; otherwise env is an environment dict.
If IPATEST_YAML_CONFIG or IPATEST_JSON_CONFIG is set,
configuration is read from the named file.
For YAML, the PyYAML (python-yaml) library needs to be installed.
Otherwise, configuration is read from various curiously
named environment variables:
See _setting_infos for test-wide settings
MASTER_env1: FQDN of the master
REPLICA_env1: space-separated FQDNs of the replicas
CLIENT_env1: space-separated FQDNs of the clients
AD_env1: space-separated FQDNs of the Active Directories
OTHER_env1: space-separated FQDNs of other hosts
(same for _env2, _env3, etc)
BEAKERREPLICA1_IP_env1: IP address of replica 1 in env 1
(same for MASTER, CLIENT, or any extra defined ROLE)
For each machine that should be accessible to tests via extra roles,
the following environment variable is necessary:
TESTHOST_<role>_env1: FQDN of the machine with the extra role <role>
You can also optionally specify the IP address of the host:
BEAKER<role>_IP_env1: IP address of the machine of the extra role
The framework will try to resolve the hostname to its IP address
if not passed via this environment variable.
Also see env_normalize() for alternate variable names
"""
if env is None:
env = os.environ
env = dict(env)
return config_from_env(env)
def config_from_env(env):
if 'IPATEST_YAML_CONFIG' in env:
try:
import yaml
except ImportError as e:
raise ImportError(
"%s, please install PyYAML package to fix it" % e)
with open(env['IPATEST_YAML_CONFIG']) as file:
confdict = yaml.safe_load(file)
return Config.from_dict(confdict)
if 'IPATEST_JSON_CONFIG' in env:
with open(env['IPATEST_JSON_CONFIG']) as file:
confdict = json.load(file)
return Config.from_dict(confdict)
env_normalize(env)
kwargs = {s.name: env.get(s.var_name, s.default)
for s in _setting_infos}
kwargs['domains'] = []
# $IPv6SETUP needs to be 'TRUE' to enable ipv6
if isinstance(kwargs['ipv6'], six.string_types):
kwargs['ipv6'] = (kwargs['ipv6'].upper() == 'TRUE')
config = Config(**kwargs)
# Either IPA master or AD can define a domain
domain_index = 1
while (env.get('MASTER_env%s' % domain_index) or
env.get('AD_env%s' % domain_index)):
if env.get('MASTER_env%s' % domain_index):
# IPA domain takes precedence to AD domain in case of conflict
config.domains.append(domain_from_env(env, config, domain_index,
domain_type='IPA'))
else:
config.domains.append(domain_from_env(env, config, domain_index,
domain_type='AD'))
domain_index += 1
return config
def config_to_env(config, simple=True):
"""Convert this test config into environment variables"""
try:
env = collections.OrderedDict()
except AttributeError:
# Older Python versions
env = {}
for setting in _setting_infos:
value = getattr(config, setting.name)
if value in (None, False):
env[setting.var_name] = ''
elif value is True:
env[setting.var_name] = 'TRUE'
else:
env[setting.var_name] = str(value)
for domain in config.domains:
env_suffix = '_env%s' % (config.domains.index(domain) + 1)
env['DOMAIN%s' % env_suffix] = domain.name
env['RELM%s' % env_suffix] = domain.realm
env['BASEDN%s' % env_suffix] = str(domain.basedn)
for role in domain.roles:
hosts = domain.hosts_by_role(role)
prefix = ('' if role in domain.static_roles
else TESTHOST_PREFIX)
hostnames = ' '.join(h.hostname for h in hosts)
env['%s%s%s' % (prefix, role.upper(), env_suffix)] = hostnames
ext_hostnames = ' '.join(h.external_hostname for h in hosts)
env['BEAKER%s%s' % (role.upper(), env_suffix)] = ext_hostnames
ips = ' '.join(h.ip for h in hosts)
env['BEAKER%s_IP%s' % (role.upper(), env_suffix)] = ips
for i, host in enumerate(hosts, start=1):
suffix = '%s%s' % (role.upper(), i)
prefix = ('' if role in domain.static_roles
else TESTHOST_PREFIX)
ext_hostname = host.external_hostname
env['%s%s%s' % (prefix, suffix,
env_suffix)] = host.hostname
env['BEAKER%s%s' % (suffix, env_suffix)] = ext_hostname
env['BEAKER%s_IP%s' % (suffix, env_suffix)] = host.ip
if simple:
# Simple Vars for simplicity and backwards compatibility with older
# tests. This means no _env<NUM> suffix.
if config.domains:
default_domain = config.domains[0]
if default_domain.master:
env['MASTER'] = default_domain.master.hostname
env['BEAKERMASTER'] = default_domain.master.external_hostname
env['MASTERIP'] = default_domain.master.ip
if default_domain.replicas:
env['SLAVE'] = env['REPLICA'] = env['REPLICA_env1']
env['BEAKERSLAVE'] = env['BEAKERREPLICA_env1']
env['SLAVEIP'] = env['BEAKERREPLICA_IP_env1']
if default_domain.clients:
client = default_domain.clients[0]
env['CLIENT'] = client.hostname
env['BEAKERCLIENT'] = client.external_hostname
if len(default_domain.clients) >= 2:
client = default_domain.clients[1]
env['CLIENT2'] = client.hostname
env['BEAKERCLIENT2'] = client.external_hostname
return env
def env_normalize(env):
"""Fill env variables from alternate variable names
MASTER_env1 <- MASTER
REPLICA_env1 <- REPLICA, SLAVE
CLIENT_env1 <- CLIENT
similarly for BEAKER* variants: BEAKERMASTER1_env1 <- BEAKERMASTER, etc.
CLIENT_env1 gets extended with CLIENT2 or CLIENT2_env1
"""
def coalesce(name, *other_names):
"""If name is not set, set it to first existing env[other_name]"""
if name not in env:
for other_name in other_names:
try:
env[name] = env[other_name]
except KeyError:
pass
else:
return
else:
env[name] = ''
coalesce('MASTER_env1', 'MASTER')
coalesce('REPLICA_env1', 'REPLICA', 'SLAVE')
coalesce('CLIENT_env1', 'CLIENT')
coalesce('BEAKERMASTER1_env1', 'BEAKERMASTER')
coalesce('BEAKERREPLICA1_env1', 'BEAKERREPLICA', 'BEAKERSLAVE')
coalesce('BEAKERCLIENT1_env1', 'BEAKERCLIENT')
def extend(name, name2):
value = env.get(name2)
if value and value not in env[name].split(' '):
env[name] += ' ' + value
extend('CLIENT_env1', 'CLIENT2')
extend('CLIENT_env1', 'CLIENT2_env1')
def domain_from_env(env, config, index, domain_type):
# Roles available in the domain depend on the type of the domain
# Unix machines are added only to the IPA domains, Windows machines
# only to the AD domains
if domain_type == 'IPA':
master_role = 'MASTER'
else:
master_role = 'AD'
env_suffix = '_env%s' % index
master_env = '%s%s' % (master_role, env_suffix)
hostname, _dot, domain_name = env[master_env].partition('.')
domain = Domain(config, domain_name, domain_type)
for role in _roles_from_env(domain, env, env_suffix):
prefix = '' if role in domain.static_roles else TESTHOST_PREFIX
value = env.get('%s%s%s' % (prefix, role.upper(), env_suffix), '')
for host_index, hostname in enumerate(value.split(), start=1):
host = host_from_env(env, domain, hostname, role,
host_index, index)
domain.hosts.append(host)
if not domain.hosts:
raise ValueError('No hosts defined for %s' % env_suffix)
return domain
def _roles_from_env(domain, env, env_suffix):
for role in domain.static_roles:
yield role
# Extra roles are defined via env variables of form TESTHOST_key_envX
roles = set()
for var in sorted(env):
if var.startswith(TESTHOST_PREFIX) and var.endswith(env_suffix):
variable_split = var.split('_')
role_name = '_'.join(variable_split[1:-1])
if (role_name and not role_name[-1].isdigit()):
roles.add(role_name.lower())
for role in sorted(roles):
yield role
def domain_to_env(domain, **kwargs):
"""Return environment variables specific to this domain"""
env = domain.config.to_env(**kwargs)
env['DOMAIN'] = domain.name
env['RELM'] = domain.realm
env['BASEDN'] = str(domain.basedn)
return env
def host_from_env(env, domain, hostname, role, index, domain_index):
ip = env.get('BEAKER%s%s_IP_env%s' %
(role.upper(), index, domain_index), None)
external_hostname = env.get(
'BEAKER%s%s_env%s' % (role.upper(), index, domain_index), None)
cls = domain.get_host_class({})
return cls(domain, hostname, role, ip, external_hostname)
def host_to_env(host, **kwargs):
"""Return environment variables specific to this host"""
env = host.domain.to_env(**kwargs)
index = host.domain.hosts.index(host) + 1
domain_index = host.config.domains.index(host.domain) + 1
role = host.role.upper()
if host.role != 'master':
role += str(index)
env['MYHOSTNAME'] = host.hostname
env['MYBEAKERHOSTNAME'] = host.external_hostname
env['MYIP'] = host.ip
prefix = ('' if host.role in host.domain.static_roles
else TESTHOST_PREFIX)
env_suffix = '_env%s' % domain_index
env['MYROLE'] = '%s%s%s' % (prefix, role, env_suffix)
env['MYENV'] = str(domain_index)
return env
def env_to_script(env):
return ''.join(['export %s=%s\n' % (key, ipautil.shell_quote(value))
for key, value in env.items()])
|
tbinjiayou/Odoo | refs/heads/master | addons/project_issue/__openerp__.py | 52 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Issue Tracking',
'version': '1.0',
'category': 'Project Management',
'sequence': 9,
'summary': 'Support, Bug Tracker, Helpdesk',
'description': """
Track Issues/Bugs Management for Projects
=========================================
This application allows you to manage the issues you might face in a project like bugs in a system, client complaints or material breakdowns.
It allows the manager to quickly check the issues, assign them and decide on their status quickly as they evolve.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'images': ['images/issue_analysis.jpeg','images/project_issue.jpeg'],
'depends': [
'sales_team',
'project',
],
'data': [
'project_issue_view.xml',
'project_issue_menu.xml',
'report/project_issue_report_view.xml',
'security/project_issue_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'project_issue_data.xml'
],
'demo': ['project_issue_demo.xml'],
'test': [
'test/issue_users.yml',
'test/subscribe_issue.yml',
'test/issue_process.yml',
'test/issue_demo.yml'
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jarvys/django-1.7-jdb | refs/heads/master | django/contrib/gis/geometry/backend/geos.py | 114 | from django.contrib.gis.geos import (
GEOSGeometry as Geometry, GEOSException as GeometryException)
__all__ = ['Geometry', 'GeometryException']
|
jbedorf/tensorflow | refs/heads/master | tensorflow/python/framework/errors_impl.py | 5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import error_interpolation
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def _compact_stack_trace(op):
"""Returns a traceback for `op` with common file prefixes stripped."""
compact_traces = []
common_prefix = error_interpolation.traceback_files_common_prefix([[op]])
for frame in op.traceback:
frame = list(frame)
filename = frame[tf_stack.TB_FILENAME]
if filename.startswith(common_prefix):
filename = filename[len(common_prefix):]
frame[tf_stack.TB_FILENAME] = filename
compact_traces.append(tuple(frame))
return compact_traces
@tf_export("errors.OpError", v1=["errors.OpError", "OpError"])
@deprecation.deprecated_endpoints("OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.errors.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nOriginal stack trace for %r:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(
_compact_stack_trace(self._op))
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(
_compact_stack_trace(original_op))
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a `tf.Variable`
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.QueueBase.close` operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
`tf.QueueBase.close`
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the `tf.nn.max_pool` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
c_api.PyExceptionRegistry_Init(_CODE_TO_EXCEPTION_CLASS)
_EXCEPTION_CLASS_TO_CODE = {
class_: code for code, class_ in _CODE_TO_EXCEPTION_CLASS.items()}
@tf_export("errors.exception_type_from_error_code")
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export("errors.error_code_from_exception_type")
def error_code_from_exception_type(cls):
try:
return _EXCEPTION_CLASS_TO_CODE[cls]
except KeyError:
warnings.warn("Unknown class exception")
return UnknownError(None, None, "Unknown class exception", None)
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
# TODO(b/77295559): expand use of TF_Status* SWIG typemap and deprecate this.
@tf_export("errors.raise_exception_on_not_ok_status") # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
|
tejasnikumbh/AllSAT | refs/heads/master | lib/python2.7/site-packages/numpy/core/tests/test_getlimits.py | 70 | """ Test functions for limits module.
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import *
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
import numpy as np
##################################################
class TestPythonFloat(TestCase):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf(TestCase):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle(TestCase):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble(TestCase):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble(TestCase):
def test_singleton(self,level=2):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
class TestIinfo(TestCase):
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
assert_equal(iinfo(dt1).min, iinfo(dt2).min)
assert_equal(iinfo(dt1).max, iinfo(dt2).max)
self.assertRaises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
class TestRepr(TestCase):
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
# Python 2.5 float formatting on Windows adds an extra 0 to the
# exponent. So test for both. Once 2.5 compatibility is dropped, this
# can simply use `assert_equal(repr(np.finfo(np.float32)), expected)`.
expected_win25 = "finfo(resolution=1e-006, min=-3.4028235e+038," + \
" max=3.4028235e+038, dtype=float32)"
actual = repr(np.finfo(np.float32))
if not actual == expected:
if not actual == expected_win25:
msg = build_err_msg([actual, desired], verbose=True)
raise AssertionError(msg)
def test_instances():
iinfo(10)
finfo(3.0)
if __name__ == "__main__":
run_module_suite()
|
abhimm/Face-attractiveness-classification | refs/heads/master | knn_classifier_with_cv.py | 1 | __author__ = 'root'
import scipy.io
from k_0_100_nn_classifier import classify_knn
import numpy
# K = no of nearest neighbors, n = n-fold cross validation
def classify_knn_with_cv(train_data, train_labels, K, n):
no_instance_in_fold = int(len(train_labels)/n)
i = 0
cv_result = list()
for j in range(n):
cv_result.append([])
no_of_fold = 0
while no_of_fold < n:
start = 0
end = 0
# prepare validation fold
start = i
if i + no_instance_in_fold < len(train_labels):
end = i+no_instance_in_fold
else:
end = len(train_data)
if no_of_fold == n-1:
end = len(train_data)
test_data = train_data[start:end]
test_labels = train_labels[start:end]
# prepare training folds
train_data_cv = numpy.array([])
train_labels_cv = numpy.array([])
if start == 0:
train_data_cv = train_data[end:]
train_labels_cv = train_labels[end:]
else:
if end == len(train_labels):
train_data_cv = train_data[:start]
train_labels_cv = train_labels[:start]
else:
train_data_cv = numpy.concatenate((train_data[:start], train_data[end:]))
train_labels_cv = numpy.concatenate((train_labels[:start], train_labels[end:]))
test_result = classify_knn(train_data_cv, train_labels_cv, test_data)
for j in range(K):
error = 0.0
for instance, result in test_result.items():
if not result[j] == test_labels[instance]:
error += 1
cv_result[no_of_fold].append(error/len(test_result))
no_of_fold += 1
i += no_instance_in_fold
final_result = list()
for j in range(K):
error = 0.0
for result in cv_result:
error += result[j]
final_result.append(error/len(cv_result))
return final_result
def main():
"""
load dataset
"""
data_file = scipy.io.loadmat('faces.mat')
train_data = data_file['traindata']
train_label = data_file['trainlabels']
no_of_folds = 10
"""
Excute cross validation
"""
output = open('cross_validation_error.dat', 'wb')
k_error = list()
k_error = classify_knn_with_cv(train_data, train_label, 100, no_of_folds)
for no_of_nearest_neighbor in range(100):
print "k-NN for no of nearest neighbor:%d" % (no_of_nearest_neighbor + 1)
print "Cross Validation Error with %d folds: %.03f" % (no_of_folds, k_error[no_of_nearest_neighbor])
output.write("%d %.03f\n" % (no_of_nearest_neighbor+1, k_error[no_of_nearest_neighbor]))
output.close()
print "Check cross_validation_error.dat for result!"
if __name__ == '__main__':
main() |
egoid/baytree | refs/heads/master | lib/python2.7/site-packages/django/utils/encoding.py | 64 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import datetime
import locale
from decimal import Decimal
from django.utils import six
from django.utils.functional import Promise
from django.utils.six.moves.urllib.parse import quote, unquote
if six.PY3:
from urllib.parse import unquote_to_bytes
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj, type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = six.integer_types + (
type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join(force_text(arg, encoding, strings_only, errors)
for arg in s)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join(force_bytes(arg, encoding, strings_only, errors)
for arg in s)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Takes an IRI in UTF-8 bytes (e.g. '/I \xe2\x99\xa5 Django/') or unicode
(e.g. '/I ♥ Django/') and returns ASCII bytes containing the encoded result
(e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def uri_to_iri(uri):
"""
Converts a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987.
Takes an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and returns
unicode containing the encoded result (e.g. '/I \xe2\x99\xa5 Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
iri = unquote_to_bytes(uri) if six.PY3 else unquote(uri)
return repercent_broken_unicode(iri).decode('utf-8')
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(force_bytes(path), safe=b"/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
we need to re-percent-encode any octet produced that is not part of a
strictly legal UTF-8 octet sequence.
"""
try:
path.decode('utf-8')
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
grembo/buildbot | refs/heads/master | master/buildbot/test/unit/test_fake_httpclientservice.py | 10 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.test.fake import httpclientservice as fakehttpclientservice
from buildbot.util import httpclientservice
from buildbot.util import service
class myTestedService(service.BuildbotService):
name = 'myTestedService'
@defer.inlineCallbacks
def reconfigService(self, baseurl):
self._http = yield httpclientservice.HTTPClientService.getService(self.master, baseurl)
@defer.inlineCallbacks
def doGetRoot(self):
res = yield self._http.get("/")
# note that at this point, only the http response headers are received
if res.code != 200:
raise Exception("%d: server did not succeed" % (res.code))
res_json = yield res.json()
# res.json() returns a deferred to represent the time needed to fetch the entire body
defer.returnValue(res_json)
class Test(unittest.SynchronousTestCase):
def setUp(self):
baseurl = 'http://127.0.0.1:8080'
self.parent = service.MasterService()
self._http = self.successResultOf(fakehttpclientservice.HTTPClientService.getFakeService(
self.parent, self, baseurl))
self.tested = myTestedService(baseurl)
self.successResultOf(self.tested.setServiceParent(self.parent))
self.successResultOf(self.parent.startService())
def test_root(self):
self._http.expect("get", "/", content_json={'foo': 'bar'})
response = self.successResultOf(self.tested.doGetRoot())
self.assertEqual(response, {'foo': 'bar'})
def test_root_error(self):
self._http.expect("get", "/", content_json={'foo': 'bar'}, code=404)
response = self.failureResultOf(self.tested.doGetRoot())
self.assertEqual(response.getErrorMessage(), '404: server did not succeed')
|
slisson/intellij-community | refs/heads/master | python/testData/completion/classPrivateNotPublic.py | 53 | class Foo:
__BOO = 1
z = Foo.__B<caret>
|
Onager/plaso | refs/heads/master | plaso/parsers/plist_plugins/airport.py | 2 | # -*- coding: utf-8 -*-
"""Plist parser plugin for Airport plist files."""
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class AirportPlugin(interface.PlistPlugin):
"""Plist parser plugin for Airport plist files."""
NAME = 'airport'
DATA_FORMAT = 'Airport plist file'
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('com.apple.airport.preferences.plist')])
PLIST_KEYS = frozenset(['RememberedNetworks'])
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
if 'RememberedNetworks' not in match:
return
for wifi in match['RememberedNetworks']:
ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')
security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'[WiFi] Connected to network: <{0:s}> using security {1:s}').format(
ssid, security_type)
event_data.key = 'item'
event_data.root = '/RememberedNetworks'
datetime_value = wifi.get('LastConnected', None)
if datetime_value:
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
date_time.CopyFromDatetime(datetime_value)
else:
date_time = dfdatetime_semantic_time.NotSet()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(AirportPlugin)
|
bitcf/bitcf | refs/heads/master | share/qt/extract_strings_qt.py | 1294 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
jonalter/WhereTo | refs/heads/master | plugins/ti.alloy/plugin.py | 16 |
import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
if binaryPath == None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
# FIXME - need to support all platforms - https://jira.appcelerator.org/browse/ALOY-85
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
|
kushal124/gensim | refs/heads/develop | gensim/test/test_phrases.py | 52 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import sys
from gensim import utils
from gensim.models.phrases import Phrases
if sys.version_info[0] >= 3:
unicode = str
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
class TestPhrasesModel(unittest.TestCase):
def testSentenceGeneration(self):
"""Test basic bigram using a dummy corpus."""
bigram = Phrases(sentences)
# test that we generate the same amount of sentences as the input
self.assertEqual(len(sentences), len(list(bigram[sentences])))
def testBigramConstruction(self):
"""Test Phrases bigram construction building."""
bigram = Phrases(sentences, min_count=1, threshold=1)
# with this setting we should get response_time and graph_minors
bigram1_seen = False
bigram2_seen = False
for s in bigram[sentences]:
if u'response_time' in s:
bigram1_seen = True
if u'graph_minors' in s:
bigram2_seen = True
self.assertTrue(bigram1_seen and bigram2_seen)
# check the same thing, this time using single doc transformation
self.assertTrue(u'response_time' in bigram[sentences[1]])
self.assertTrue(u'response_time' in bigram[sentences[4]])
self.assertTrue(u'graph_minors' in bigram[sentences[-2]])
self.assertTrue(u'graph_minors' in bigram[sentences[-1]])
def testBadParameters(self):
"""Test the phrases module with bad parameters."""
# should fail with something less or equal than 0
self.assertRaises(ValueError, Phrases, sentences, min_count=0)
# threshold should be positive
self.assertRaises(ValueError, Phrases, sentences, threshold=-1)
def testEncoding(self):
"""Test that both utf8 and unicode input work; output must be unicode."""
expected = [u'survey', u'user', u'computer', u'system', u'response_time']
bigram_utf8 = Phrases(sentences, min_count=1, threshold=1)
self.assertEquals(bigram_utf8[sentences[1]], expected)
unicode_sentences = [[utils.to_unicode(w) for w in sentence] for sentence in sentences]
bigram_unicode = Phrases(unicode_sentences, min_count=1, threshold=1)
self.assertEquals(bigram_unicode[sentences[1]], expected)
transformed = ' '.join(bigram_utf8[sentences[1]])
self.assertTrue(isinstance(transformed, unicode))
def testPruning(self):
"""Test that max_vocab_size parameter is respected."""
bigram = Phrases(sentences, max_vocab_size=5)
self.assertTrue(len(bigram.vocab) <= 5)
#endclass TestPhrasesModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
robovm/robovm-studio | refs/heads/master | python/testData/refactoring/pushdown/simple.before.py | 166 | class Foo:
def foo(self):
print("a")
class Boo(Foo):
def boo(self):
print "rrrrr" |
gnuhub/intellij-community | refs/heads/master | python/testData/inspections/PyMissingConstructorInspection/deepInheritance.py | 83 | class A(object):
def __init__(self):
print ("Constructor A was called")
class B(A):
pass
class C(B):
def <warning descr="Call to __init__ of super class is missed">__init__</warning>(self):
print ("Constructor C was called")
|
carsongee/edx-platform | refs/heads/master | lms/djangoapps/instructor/tests/test_api.py | 8 | # -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
import ddt
import random
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.conf import settings
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA, Role
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.test import RequestFactory
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from microsite_configuration import microsite
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import _split_input_list, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from shoppingcart.models import CourseRegistrationCode, RegistrationCodeRedemption, Order, PaidCourseRegistration, Coupon
from course_modes.models import CourseMode
from .test_tools import msk_from_problem_urlname, get_extended_due
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership', {'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.request = RequestFactory().request()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled', last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/MITx/999/Robot_Super_Course/about'
self.course_path = '/courses/MITx/999/Robot_Super_Course/'
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in Robot Super Course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see Robot Super Course listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student"
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org"
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.ddt
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# Email URL values
self.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
self.about_path = '/courses/MITx/999/Robot_Super_Course/about'
self.course_path = '/courses/MITx/999/Robot_Super_Course/'
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True, 'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.get(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been removed from a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"Robot Super Course at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
# Test add discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "allow")
# Test revoke discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "revoke")
# Test add discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "allow")
# Test revoke discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "revoke")
# Test add discussion community TA with email.
self.assert_update_forum_role_membership(self.other_user.email, "Community TA", "allow")
# Test revoke discussion community TA with username.
self.assert_update_forum_role_membership(self.other_user.username, "Community TA", "revoke")
def assert_update_forum_role_membership(self, unique_student_identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': unique_student_identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = self.other_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.course = CourseFactory.create()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_ecommerce_purchase_features_csv(self):
"""
Test that the response from get_purchase_transaction is in csv format.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_ecommerce_purchase_features_with_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
# using coupon code
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res in res_json['students']:
self.validate_purchased_transaction_response(res, self.cart, self.instructor, self.coupon_code)
def test_get_ecommerce_purchases_features_without_coupon_info(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_purchase_transaction.
"""
url = reverse('get_purchase_transaction', kwargs={'course_id': self.course.id.to_deprecated_string()})
carts, instructors = ([] for i in range(2))
# purchasing the course by different users
for _ in xrange(3):
test_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=test_instructor.username, password='test')
cart = Order.get_cart_for_user(test_instructor)
carts.append(cart)
instructors.append(test_instructor)
PaidCourseRegistration.add_to_order(cart, self.course.id)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for res, i in zip(res_json['students'], xrange(3)):
self.validate_purchased_transaction_response(res, carts[i], instructors[i], 'None')
def validate_purchased_transaction_response(self, res, cart, user, code):
"""
validate purchased transactions attribute values with the response object
"""
item = cart.orderitem_set.all().select_subclasses()[0]
self.assertEqual(res['coupon_code'], code)
self.assertEqual(res['username'], user.username)
self.assertEqual(res['email'], user.email)
self.assertEqual(res['list_price'], item.list_price)
self.assertEqual(res['unit_cost'], item.unit_cost)
self.assertEqual(res['order_id'], cart.id)
self.assertEqual(res['orderitem_id'], item.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"2","41","42"\n'
))
self.assertTrue(body.endswith('"7","41","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
def test_calculate_grades_csv_success(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.return_value = True
response = self.client.get(url, {})
success_status = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertIn(success_status, response.content)
def test_calculate_grades_csv_already_running(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_location = msk_from_problem_urlname(
self.course.id,
'robot-some-problem-urlname'
)
self.problem_urlname = self.problem_location.to_deprecated_string()
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def setup_fake_email_info(self, num_emails):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
self.tasks[email_id] = FakeContentTask(email_id, num_sent, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
response = self.get_email_content_response(50, task_history_request)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), 50)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(actual_email_info, expected_email_info)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent']:
self.assertEqual(returned_info[info], None)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id!s}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id.to_deprecated_string(),
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), [u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create(due=due)
course.children = [week1.location.to_deprecated_string(), week2.location.to_deprecated_string(),
week3.location.to_deprecated_string()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(None,
get_extended_due(self.course, self.week1, self.user1))
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(ModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
def setUp(self):
"""
Fixtures.
"""
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
# Active Registration Codes
for i in range(12):
course_registration_code = CourseRegistrationCode(
code='MyCode0{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Test Group', created_by=self.instructor
)
course_registration_code.save()
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_registration_code_number': 15.0, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 17)
@patch.object(instructor.views.api, 'random_code_generator', Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {'course_registration_code_number': 3, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator', Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_registration_code_number': 2, 'transaction_group_name': 'Test Group'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 7)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
order_id=i, registration_code_id=i, redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 9)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
data = {'active_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_transaction_group_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 14)
for i in range(9):
course_registration_code = CourseRegistrationCode(
code='TestCode{}'.format(i), course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Group Alpha', created_by=self.instructor
)
course_registration_code.save()
data = {'download_transaction_group_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"code","course_id","transaction_group_name","created_by","redeemed_by"'))
self.assertEqual(len(body.split('\n')), 11)
|
Didacti/elixir | refs/heads/master | tests/test_autoload.py | 1 | """
test autoloaded entities
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from sqlalchemy import Table, Column, ForeignKey
from elixir import *
import elixir
def setup_entity_raise(cls):
try:
setup_entities([cls])
except Exception as e:
pass
else:
assert False, "Exception did not occur setting up %s" % cls.__name__
# ------
def setup():
elixir.options_defaults.update(dict(autoload=True, shortnames=True))
def teardown():
elixir.options_defaults.update(dict(autoload=False, shortnames=False))
# -----------
class TestAutoload(object):
def setup(self):
metadata.bind = 'sqlite://'
def teardown(self):
cleanup_all(True)
def test_simple(self):
person_table = Table('person', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(32)))
animal_table = Table('animal', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30)),
Column('owner_id', Integer, ForeignKey('person.id')),
Column('feeder_id', Integer, ForeignKey('person.id')))
metadata.create_all()
metadata.clear()
class Person(Entity):
pets = OneToMany('Animal', inverse='owner')
animals = OneToMany('Animal', inverse='feeder')
class Animal(Entity):
owner = ManyToOne('Person', colname='owner_id')
feeder = ManyToOne('Person', colname='feeder_id')
setup_all()
snowball = Animal(name="Snowball II")
slh = Animal(name="Santa's Little Helper")
homer = Person(name="Homer", animals=[snowball, slh], pets=[slh])
lisa = Person(name="Lisa", pets=[snowball])
session.commit()
session.close()
homer = Person.get_by(name="Homer")
lisa = Person.get_by(name="Lisa")
slh = Animal.get_by(name="Santa's Little Helper")
assert len(homer.animals) == 2
assert homer == lisa.pets[0].feeder
assert homer == slh.owner
def test_selfref(self):
person_table = Table('person', metadata,
Column('id', Integer, primary_key=True),
Column('father_id', Integer, ForeignKey('person.id')),
Column('name', String(32)))
metadata.create_all()
metadata.clear()
class Person(Entity):
father = ManyToOne('Person')
children = OneToMany('Person')
setup_all()
grampa = Person(name="Abe")
homer = Person(name="Homer")
bart = Person(name="Bart")
lisa = Person(name="Lisa")
grampa.children.append(homer)
homer.children.append(bart)
lisa.father = homer
session.commit()
session.close()
p = Person.get_by(name="Homer")
assert p in p.father.children
assert p.father.name == "Abe"
assert p.father is Person.get_by(name="Abe")
assert p is Person.get_by(name="Lisa").father
def test_m2m(self):
person_table = Table('person', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(32)))
category_table = Table('category', metadata,
Column('name', String(30), primary_key=True))
person_category_table = Table('person_category', metadata,
Column('person_id', Integer, ForeignKey('person.id')),
Column('category_name', String(30), ForeignKey('category.name')))
metadata.create_all()
metadata.clear()
class Person(Entity):
categories = ManyToMany('Category',
tablename='person_category')
class Category(Entity):
persons = ManyToMany('Person',
tablename='person_category')
setup_all()
stupid = Category(name="Stupid")
simpson = Category(name="Simpson")
old = Category(name="Old")
grampa = Person(name="Abe", categories=[simpson, old])
homer = Person(name="Homer", categories=[simpson, stupid])
bart = Person(name="Bart")
lisa = Person(name="Lisa")
simpson.persons.extend([bart, lisa])
session.commit()
session.close()
c = Category.get_by(name="Simpson")
grampa = Person.get_by(name="Abe")
assert len(c.persons) == 4
assert c in grampa.categories
def test_m2m_selfref(self):
person_table = Table('person', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(32)))
person_person_table = Table('person_person', metadata,
Column('person_id1', Integer, ForeignKey('person.id')),
Column('person_id2', Integer, ForeignKey('person.id')))
metadata.create_all()
metadata.clear()
class Person(Entity):
appreciate = ManyToMany('Person',
tablename='person_person',
local_colname='person_id1')
isappreciatedby = ManyToMany('Person',
tablename='person_person',
local_colname='person_id2')
setup_all()
barney = Person(name="Barney")
homer = Person(name="Homer", appreciate=[barney])
session.commit()
session.close()
homer = Person.get_by(name="Homer")
barney = Person.get_by(name="Barney")
assert barney in homer.appreciate
assert homer in barney.isappreciatedby
# ----------------
# overrides tests
# ----------------
def _create_table_a(self):
a_table = Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(32)))
metadata.create_all()
metadata.clear()
def test_override_pk_fails(self):
self._create_table_a()
class A(Entity):
id = Field(Integer, primary_key=True)
setup_entity_raise(A)
def test_override_non_pk_fails(self):
self._create_table_a()
class A(Entity):
name = Field(String(30))
setup_entity_raise(A)
# ---------------
def test_nopk(self):
table = Table('a', metadata,
Column('id', Integer),
Column('name', String(32)))
metadata.create_all()
metadata.clear()
class A(Entity):
using_mapper_options(primary_key=['id'])
setup_all()
a1 = A(id=1, name="a1")
session.commit()
session.close()
res = A.query.all()
assert len(res) == 1
assert res[0].name == "a1"
def test_inheritance(self):
table = Table('father', metadata,
Column('id', Integer, primary_key=True),
Column('row_type', elixir.options.POLYMORPHIC_COL_TYPE))
metadata.create_all()
metadata.clear()
class Father(Entity):
pass
class Son(Father):
pass
setup_all()
def test_autoload_mixed(self):
# mixed autoloaded entity with a non autoloaded one
conn = metadata.bind.connect()
conn.execute("CREATE TABLE user ("
"user_id INTEGER PRIMARY KEY AUTOINCREMENT)")
conn.close()
class User(Entity):
using_options(tablename='user', autoload=True)
class Item(Entity):
using_options(autoload=False)
owner = ManyToOne('User')
setup_all(True)
colname = list(Item.table.c['owner_user_id'].foreign_keys)[0].column.name
assert colname == 'user_id'
|
mantidproject/mantid | refs/heads/master | Testing/PerformanceTests/reporters.py | 6 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
import sys
#########################################################################
# A base class to support report results in an appropriate manner
#########################################################################
class ResultReporter(object):
'''
A base class for results reporting. In order to get the results in an
appropriate form, subclass this class and implement the dispatchResults
method.
'''
def __init__(self):
'''Initialize a class instance, e.g. connect to a database'''
pass
def dispatchResults(self, result):
"""
Parameters
result: a TestResult object """
raise NotImplementedError('"dispatchResults(self, result)" should be overridden in a derived class')
#########################################################################
# A class to report results as formatted text output
#########################################################################
class TextResultReporter(ResultReporter):
'''
Report the results of a test using standard out
'''
def dispatchResults(self, result):
'''
Print the results to standard out
'''
nstars = 30
print '*' * nstars
for (name, val) in result.data.items():
str_val = str(val)
str_val = str_val.replace("\n", " ")
if len(str_val) > 50:
str_val = str_val[:50] + " . . . "
print ' ' + name.ljust(15) + '-> ', str_val
print '*' * nstars
#########################################################################
# A class to report results as formatted text output
#########################################################################
class LogArchivingReporter(ResultReporter):
'''
Report the results of a test using standard out
'''
def __init__(self, logarchive):
# Path to a log archiving folder
self.logarchive = os.path.abspath(logarchive)
if not os.path.exists(self.logarchive):
os.mkdir(self.logarchive)
def dispatchResults(self, result):
'''
Print the results to standard out
'''
fullpath = os.path.join(self.logarchive, result.get_logarchive_filename())
f = open(fullpath, "w")
f.write(result["log_contents"])
f.close()
#########################################################################
# A class to report results as XML that Hudson can interpret
#########################################################################
class JUnitXMLReporter(ResultReporter):
'''
Report the results of a test to a JUnit style XML format
that can be read by Hudson/Jenkins
'''
def __init__(self, path):
# Path to .xml files
self._path = path
def dispatchResults(self, result):
'''
Make a junit .xml file
'''
fullpath = os.path.join(self._path, "%s.xml" % result["name"])
f = open(fullpath, 'w')
names = result["name"].split(".")
suitename = names[0]
testname = ".".join(names[1:])
failure = ""
num_failures = 0
if not result["success"]:
failure = """\n <failure type="failedAssert">%s</failure>
<system-out ><![CDATA[%s]]></system-out>""" % (result["status"], result["log_contents"])
num_failures = 1
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="%s" tests="1" failures="%d" disabled="0" errors="0" time="0.0">
<testcase name="%s" time="%f" classname="%s">%s
</testcase>
</testsuite>
""" % (suitename, num_failures, testname, result["runtime"], suitename, failure) )
if __name__=="__main__":
import testresult
rep = JUnitXMLReporter(".")
res = testresult.TestResult()
res["name"] = "MyTestTest.Test"
res["status"] = "success maybe?"
res["success"] = True
res["runtime"] = 1.234
rep.dispatchResults(res)
res = testresult.TestResult()
res["name"] = "MyTestTest.OtherTest"
res["status"] = "failure"
res["success"] = False
res["runtime"] = 3.456
rep.dispatchResults(res)
|
hittu123/ruhive | refs/heads/master | src/profiles/admin.py | 75 | from __future__ import unicode_literals
from django.contrib import admin
from authtools.admin import NamedUserAdmin
from .models import Profile
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
User = get_user_model()
class UserProfileInline(admin.StackedInline):
model = Profile
class NewUserAdmin(NamedUserAdmin):
inlines = [UserProfileInline]
list_display = ('is_active', 'email', 'name', 'permalink',
'is_superuser', 'is_staff',)
# 'View on site' didn't work since the original User model needs to
# have get_absolute_url defined. So showing on the list display
# was a workaround.
def permalink(self, obj):
url = reverse("profiles:show",
kwargs={"slug": obj.profile.slug})
# Unicode hex b6 is the Pilcrow sign
return '<a href="{}">{}</a>'.format(url, '\xb6')
permalink.allow_tags = True
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
|
Tejal011089/medsyn2_lib | refs/heads/master | webnotes/app.py | 30 | import sys, os
import json
sys.path.insert(0, '.')
sys.path.insert(0, 'app')
sys.path.insert(0, 'lib')
from werkzeug.wrappers import Request, Response
from werkzeug.local import LocalManager
from webnotes.middlewares import StaticDataMiddleware
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.contrib.profiler import ProfilerMiddleware
from webnotes import get_config
import mimetypes
import webnotes
import webnotes.handler
import webnotes.auth
import webnotes.webutils
local_manager = LocalManager([webnotes.local])
def handle_session_stopped():
res = Response("""<html>
<body style="background-color: #EEE;">
<h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto">
Updating.
We will be back in a few moments...
</h3>
</body>
</html>""")
res.status_code = 503
res.content_type = 'text/html'
return res
@Request.application
def application(request):
webnotes.local.request = request
try:
site = webnotes.utils.get_site_name(request.host)
webnotes.init(site=site)
webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in (request.form or request.args).iteritems() })
webnotes.local._response = Response()
try:
webnotes.http_request = webnotes.auth.HTTPRequest()
except webnotes.AuthenticationError, e:
pass
if webnotes.form_dict.cmd:
webnotes.handler.handle()
elif webnotes.local.request.method in ('GET', 'HEAD'):
webnotes.webutils.render(webnotes.request.path[1:])
else:
raise NotFound
except HTTPException, e:
return e
except webnotes.SessionStopped, e:
webnotes.local._response = handle_session_stopped()
finally:
if webnotes.conn:
webnotes.conn.close()
return webnotes.local._response
application = local_manager.make_middleware(application)
if not os.environ.get('NO_STATICS'):
application = StaticDataMiddleware(application, {
'/': 'public',
})
def serve(port=8000, profile=False):
webnotes.validate_versions()
global application
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application)
run_simple('0.0.0.0', int(port), application, use_reloader=True,
use_debugger=True, use_evalex=True)
|
pigeonflight/strider-plone | refs/heads/master | docker/appengine/google/appengine/tools/devappserver2/python/pdb_sandbox.py | 14 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Modify pdb to work with the devappserver2 sandbox."""
import sys
import threading
def install(config):
"""Install the necessary changes to pdb.
Monkeypatch pdb so that it can be used in the devappserver sandbox. Must
be called after the sandbox has been installed but before stdin/stdout
objects have been reassigned.
Args:
config: The runtime_config_pb2.Config to use to configure the sandbox.
"""
# Import here (i.e. after sandbox installed) to get the post sandbox pdb.
# Extremely important so that we monkeypatch the same pdb the apps can
# import.
import pdb as pdb_postsandbox
# Save stdin/stdout as the references will not be available when user
# code runs.
real_stdin = sys.stdin
real_stdout = sys.stdout
# Capture the original Pdb so we can forward the __init__ call after
# monkeypatching (if not captured, forwarding the call results in infinite
# recursion).
pdb_premonkeypatch = pdb_postsandbox.Pdb
if config.threadsafe or config.max_instances != 1:
warning = """
********************************************************************************
* WARNING: please read before using PDB:
* https://developers.google.com/appengine/docs/python/tools/devserver#Python_Debugging_with_PDB
********************************************************************************
"""
lock = threading.Lock()
else:
warning = ''
class _Pdb(pdb_postsandbox.Pdb):
_warning_written = False
# TODO: improve argument handling so if new arguments are added
# in the future or the defaults change, this does not need to be updated.
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
if stdin is None:
stdin = real_stdin
if stdout is None:
stdout = real_stdout
# Pdb is old style class so no super().
pdb_premonkeypatch.__init__(self, completekey, stdin, stdout, skip)
if warning:
with lock:
# Note: while the goal is to write the warning only one time, it
# may be written multiple times (once each per instance).
if not _Pdb._warning_written:
stdout.write(warning)
_Pdb._warning_written = True
pdb_postsandbox.Pdb = _Pdb
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.4/tests/regressiontests/fixtures_regress/tests.py | 29 | # -*- coding: utf-8 -*-
# Unittests for fixtures.
from __future__ import absolute_import
import os
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core import management
from django.core.management.base import CommandError
from django.core.management.commands.dumpdata import sort_dependencies
from django.db import transaction
from django.db.models import signals
from django.test import (TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.test.utils import override_settings
from .models import (Animal, Stuff, Absolute, Parent, Child, Article, Widget,
Store, Person, Book, NKChild, RefToNKChild, Circle1, Circle2, Circle3,
ExternalDependency, Thingy)
pre_save_checks = []
def animal_pre_save_check(signal, sender, instance, **kwargs):
"A signal that is used to check the type of data loaded from fixtures"
pre_save_checks.append(
(
'Count = %s (%s)' % (instance.count, type(instance.count)),
'Weight = %s (%s)' % (instance.weight, type(instance.weight)),
)
)
class TestFixtures(TestCase):
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
'loaddata',
'sequence',
verbosity=0,
commit=False
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
self.assertGreater(animal.id, 1)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
commit=False
)
self.assertEqual(Stuff.objects.all()[0].name, None)
self.assertEqual(Stuff.objects.all()[0].owner, None)
@skipUnlessDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
commit=False
)
self.assertEqual(Stuff.objects.all()[0].name, u'')
self.assertEqual(Stuff.objects.all()[0].owner, None)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(__file__),
'fixtures',
'absolute.json'
)
management.call_command(
'loaddata',
load_absolute_path,
verbosity=0,
commit=False
)
self.assertEqual(Absolute.load_count, 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"Problem installing fixture 'bad_fixture1': unkn is not a known serialization format.\n"
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2.xml',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_empty(self):
"""
Test for ticket #4371 -- Loading a fixture file with no data returns an error.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'empty',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'empty'. (File format may be invalid.)\n"
)
def test_abort_loaddata_on_error(self):
"""
Test for ticket #4371 -- If any of the fixtures contain an error,
loading is aborted.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'empty',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'empty'. (File format may be invalid.)\n"
)
def test_error_message(self):
"""
(Regression for #9011 - error message is correct)
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2',
'animal',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
'loaddata',
'big-fixture.json',
verbosity=0,
commit=False
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
global pre_save_checks
pre_save_checks = []
signals.pre_save.connect(animal_pre_save_check)
try:
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
commit=False,
)
self.assertEqual(
pre_save_checks,
[
("Count = 42 (<type 'int'>)", "Weight = 1.2 (<type 'float'>)")
]
)
finally:
signals.pre_save.disconnect(animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Ensure that dumpdata honors the default manager
Dump the current contents of the database as a JSON fixture
"""
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
commit=False,
)
management.call_command(
'loaddata',
'sequence.json',
verbosity=0,
commit=False,
)
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.animal',
format='json',
stdout=stdout
)
# Output order isn't guaranteed, so check for parts
data = stdout.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub('0{6,}\d', '', data)
lion_json = '{"pk": 1, "model": "fixtures_regress.animal", "fields": {"count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo"}}'
emu_json = '{"pk": 10, "model": "fixtures_regress.animal", "fields": {"count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae"}}'
platypus_json = '{"pk": %d, "model": "fixtures_regress.animal", "fields": {"count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus"}}'
platypus_json = platypus_json % animal.pk
self.assertEqual(len(data), len('[%s]' % ', '.join([lion_json, emu_json, platypus_json])))
self.assertTrue(lion_json in data)
self.assertTrue(emu_json in data)
self.assertTrue(platypus_json in data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
stdout = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name='grommet')
management.call_command(
'dumpdata',
'fixtures_regress.widget',
'fixtures_regress.widgetproxy',
format='json',
stdout=stdout
)
self.assertEqual(
stdout.getvalue(),
"""[{"pk": %d, "model": "fixtures_regress.widget", "fields": {"name": "grommet"}}]"""
% widget.pk
)
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Regression for #3615 - Forward references cause fixtures not to load in MySQL (InnoDB)
"""
management.call_command(
'loaddata',
'forward_ref.json',
verbosity=0,
commit=False
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Regression for #3615 - Ensure data with nonexistent child key references raises error
"""
stderr = StringIO()
management.call_command(
'loaddata',
'forward_ref_bad_data.json',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertTrue(
stderr.getvalue().startswith('Problem installing fixture')
)
_cur_dir = os.path.dirname(os.path.abspath(__file__))
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_forward_refs_split_fixtures(self):
"""
Regression for #17530 - should be able to cope with forward references
when the fixtures are not in the same files or directories.
"""
management.call_command(
'loaddata',
'forward_ref_1.json',
'forward_ref_2.json',
verbosity=0,
commit=False
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_no_fixture_specified(self):
"""
Regression for #7043 - Error is quickly reported when no fixtures is provided in the command line.
"""
stderr = StringIO()
management.call_command(
'loaddata',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(), 'No database fixture specified. Please provide the path of at least one fixture in the command line.\n'
)
def test_loaddata_not_existant_fixture_file(self):
stdout_output = StringIO()
management.call_command(
'loaddata',
'this_fixture_doesnt_exist',
verbosity=2,
commit=False,
stdout=stdout_output,
)
self.assertTrue("No xml fixture 'this_fixture_doesnt_exist' in" in
stdout_output.getvalue())
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
commit=False
)
self.assertEqual(
NKChild.objects.get(pk=1).data,
'apple'
)
self.assertEqual(
RefToNKChild.objects.get(pk=1).nk_fk.data,
'apple'
)
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance2.xml',
verbosity=0,
commit=False
)
self.assertEqual(
NKChild.objects.get(pk=2).data,
'banana'
)
self.assertEqual(
RefToNKChild.objects.get(pk=2).nk_fk.data,
'apple'
)
def test_nk_on_serialize(self):
"""
Check that natural key requirements are taken into account
when serializing models
"""
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
commit=False
)
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.book',
'fixtures_regress.person',
'fixtures_regress.store',
verbosity=0,
format='json',
use_natural_keys=True,
stdout=stdout,
)
self.assertEqual(
stdout.getvalue(),
"""[{"pk": 2, "model": "fixtures_regress.store", "fields": {"name": "Amazon"}}, {"pk": 3, "model": "fixtures_regress.store", "fields": {"name": "Borders"}}, {"pk": 4, "model": "fixtures_regress.person", "fields": {"name": "Neal Stephenson"}}, {"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]], "name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]"""
)
def test_dependency_sorting(self):
"""
Now lets check the dependency sorting explicitly
It doesn't matter what order you mention the models
Store *must* be serialized before then Person, and both
must be serialized before Book.
"""
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Person, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_2(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Store, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_3(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Book, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_4(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Person, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_5(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Book, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_6(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_dangling(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Circle1, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Circle1, Store, Person, Book]
)
def test_dependency_sorting_tight_circular(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Store, Book])],
)
def test_dependency_sorting_tight_circular_2(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Circle1, Book, Circle2])],
)
def test_dependency_self_referential(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Book, Circle3])],
)
def test_dependency_sorting_long(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Circle3, Store, Book])],
)
def test_dependency_sorting_normal(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, ExternalDependency, Book])]
)
self.assertEqual(
sorted_deps,
[Person, Book, ExternalDependency]
)
def test_normal_pk(self):
"""
Check that normal primary keys still work
on a model with natural key capabilities
"""
management.call_command(
'loaddata',
'non_natural_1.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'non_natural_2.xml',
verbosity=0,
commit=False
)
books = Book.objects.all()
self.assertEqual(
books.__repr__(),
"""[<Book: Cryptonomicon by Neal Stephenson (available at Amazon, Borders)>, <Book: Ender's Game by Orson Scott Card (available at Collins Bookstore)>, <Book: Permutation City by Greg Egan (available at Angus and Robertson)>]"""
)
class TestTicket11101(TransactionTestCase):
def ticket_11101(self):
management.call_command(
'loaddata',
'thingy.json',
verbosity=0,
commit=False
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.rollback()
self.assertEqual(Thingy.objects.count(), 0)
transaction.commit()
@skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101)."""
ticket_11101 = transaction.commit_manually(self.ticket_11101)
ticket_11101()
|
ktnyt/chainer | refs/heads/master | chainer/functions/math/hyperbolic.py | 2 | from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Cosh(function_node.FunctionNode):
@property
def label(self):
return 'cosh'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.cosh(x[0])),
def backward(self, indexes, gy):
x = self.get_retained_inputs()
gx = sinh(x[0])
gx *= gy[0]
return gx,
def cosh(x):
"""Elementwise hyperbolic cosine function.
.. math::
y_i = \\cosh x_i.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Cosh().apply((x,))[0]
class Sinh(function_node.FunctionNode):
@property
def label(self):
return 'sinh'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.sinh(x[0])),
def backward(self, x, gy):
x = self.get_retained_inputs()
gx = cosh(x[0])
gx *= gy[0]
return gx,
def sinh(x):
"""Elementwise hyperbolic sine function.
.. math::
y_i = \\sinh x_i.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Sinh().apply((x,))[0]
|
Jc2k/libcloud | refs/heads/trunk | libcloud/test/dns/test_linode.py | 3 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.linode import LinodeException
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.linode import LinodeDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_LINODE
class LinodeTests(unittest.TestCase):
def setUp(self):
LinodeDNSDriver.connectionCls.conn_classes = (
None, LinodeMockHttp)
LinodeMockHttp.use_param = 'api_action'
LinodeMockHttp.type = None
self.driver = LinodeDNSDriver(*DNS_PARAMS_LINODE)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 7)
self.assertTrue(RecordType.A in record_types)
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_list_records_success(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
record = records[0]
self.assertEqual(record.id, '28536')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '75.127.96.245')
self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
LinodeMockHttp.type = 'GET_ZONE'
zone = self.driver.get_zone(zone_id='5093')
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_get_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='4444')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '4444')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
LinodeMockHttp.type = 'GET_RECORD'
record = self.driver.get_record(zone_id='1234', record_id='28536')
self.assertEqual(record.id, '28536')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '75.127.96.245')
self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_get_record_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4441', record_id='28536')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
zone = self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '5123')
self.assertEqual(zone.domain, 'foo.bar.com')
def test_create_zone_validaton_error(self):
LinodeMockHttp.type = 'VALIDATION_ERROR'
try:
self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
except LinodeException:
pass
else:
self.fail('Exception was not thrown')
def test_update_zone_success(self):
zone = self.driver.list_zones()[0]
updated_zone = self.driver.update_zone(zone=zone,
domain='libcloud.org',
ttl=10,
extra={'SOA_Email':
'bar@libcloud.org'})
self.assertEqual(zone.extra['SOA_Email'], 'dns@example.com')
self.assertEqual(updated_zone.id, zone.id)
self.assertEqual(updated_zone.domain, 'libcloud.org')
self.assertEqual(updated_zone.type, zone.type)
self.assertEqual(updated_zone.ttl, 10)
self.assertEqual(updated_zone.extra['SOA_Email'], 'bar@libcloud.org')
self.assertEqual(updated_zone.extra['status'], zone.extra['status'])
self.assertEqual(updated_zone.extra['description'],
zone.extra['description'])
def test_create_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(name='www', zone=zone,
type=RecordType.A, data='127.0.0.1')
self.assertEqual(record.id, '28537')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
updated_record = self.driver.update_record(record=record, name='www',
type=RecordType.AAAA,
data='::1')
self.assertEqual(record.data, '75.127.96.245')
self.assertEqual(updated_record.id, record.id)
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.AAAA)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone_success(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
LinodeMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class LinodeMockHttp(MockHttp):
fixtures = DNSFileFixtures('linode')
def _domain_list(self, method, url, body, headers):
body = self.fixtures.load('domain_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('resource_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, body,
headers):
body = self.fixtures.load('resource_list_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url,
body, headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_resource_list(self, method,
url, body,
headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _VALIDATION_ERROR_domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain_validation_error.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_update(self, method, url, body, headers):
body = self.fixtures.load('update_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_create(self, method, url, body, headers):
body = self.fixtures.load('create_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_update(self, method, url, body, headers):
body = self.fixtures.load('update_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RECORD_DOES_NOT_EXIST_domain_resource_delete(self, method, url, body,
headers):
body = self.fixtures.load('delete_resource_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
brunetton/dia | refs/heads/master | samples/gobj-parse.py | 7 | '''
Parses class definitions out of GObject based headers
ToDo:
- better parser aproach ;)
- respect /*< public,protected,private >*/
- parse *.c
'''
# Copyright (c) 2006, Hans Breuer <hans@breuer.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import glob, os, string, re
verbose = 0
def strip_whitespace (s) :
s = string.replace (s, " ", "")
return string.replace (s, "\t", "")
class CMethod :
def __init__ (self, name, type) :
self.name = string.strip(name)
self.pars = []
self.retval = string.replace(type, ' ', '')
def AddPar (self, name, type) :
self.pars.append ((string.strip(name),
strip_whitespace(type)))
class CClass :
def __init__ (self, name) :
self.name = string.strip(name)
self.defs = {'TYPE' : None, 'OBJECT' : None, 'CLASS' : None}
self.parent = None
self.attrs = []
self.methods = {}
self.signals = []
def AddAttr (self, name, type) :
if verbose : print "\t", type, name
self.attrs.append ((string.strip(name),
strip_whitespace(type)))
def AddMethod (self, name, method) :
if verbose : print "\t", name, "()"
self.methods[string.strip(name)] = method
def AddSignal (self, name, type) :
self.signals.append ((string.strip(name),
strip_whitespace(type)))
class CStripCommentsAndBlankLines :
def __init__ (self, name) :
f = open (name)
lines = f.readlines ()
self.lines = []
self.cur = -1
x1 = -1
x2 = -1
incom = 0
r = re.compile("^\s*$") # to remove empty lines
for s in lines :
x1 = string.find (s, '/*')
x2 = string.find (s, '*/')
while x2 > x1 and incom != 1 :
s = s[:x1] + s[x2+2:]
x1 = string.find (s, '/*')
x2 = string.find (s, '*/')
else :
if x1 > -1 :
incom = 1
s = s[:x1]
elif x2 > - 1 and incom :
s = s[x2+2:]
incom = 0
if r.match (s) or incom :
continue
self.lines.append (s)
self.lines.append ("")
f.close ()
def readline (self) :
self.cur = self.cur + 1
try :
return self.lines[self.cur]
except :
return ""
def seek (self, toPos) :
if toPos == 0 :
self.cur = -1
def TestStripFile (name) :
f = CStripCommentsAndBlankLines(name)
s = f.readline()
while s :
print s[:-1]
s = f.readline()
import sys
sPkg = ''
sDir = '.'
if len (sys.argv) < 2 :
print sys.argv[0], '<package>', '[directory]'
sys.exit(0)
else :
sPkg = sys.argv[1]
if len (sys.argv) > 2 :
sDir = sys.argv[2]
os.chdir (sDir)
lst = glob.glob ('*.h')
lst.extend (glob.glob ("*/*.h"))
lst.extend (glob.glob ("*/*/*.h"))
klasses = []
no_klass_headers = []
#
# #define GDK_DRAWABLE(object) (G_TYPE_CHECK_INSTANCE_CAST ((object), GDK_TYPE_DRAWABLE, GdkDrawable))
#
rClassCast = re.compile ("^#define " + string.upper (sPkg) + "_(?P<n1>\w+)" \
"\((?P<par>\w+)\)\s+\(" \
"((G_TYPE_CHECK_INSTANCE_CAST)|(GTK_CHECK_CAST))\s+" \
"\(\((?P=par)\),\s*(?P<type>\w+),\s+" \
"(?P<name>\w+)\)\).*")
#
# m.group('type') == 'GDK_TYPE_DRAWABLE'
# m.group('name') == 'GdkTypeDrawable'
#
rVarDecl = re.compile ("\s*(?P<type>(unsigned)*\s*\w+\s*\**)\s*(?P<name>\w+)\s*:*\s*\d*\s*;\.*")
rSignal = rVarDecl #FIXME: signals are 'methods' to be registered
rMethod = re.compile ("\s*(?P<type>\w+\s+\**)\s*" \
"\(\*\s*(?P<name>\w+)\)\s*\(" \
"(?P<ptype>\w+\s*\**)\s*(?P<pname>\w+),*" \
"(?P<done>(\);)*)")
# m.group('type') == retval
# m.group('name')
# m.group('ptype'), m.group('pname')
# m.group('done')
rPar = re.compile ("\s*(const\s*)*(?P<ptype>\w+\s*\**)\s*(?P<pname>\w+),*" \
"(?P<done>(\);)*)")
fout = open (sPkg + '-generated.log', 'w')
# the type cast is not necessarily in the same header ;(
unresolved = {}
for sF in lst :
sName = sF[len(sPkg):-2] # without case (approximation)
# fin = open (sF)
fin = CStripCommentsAndBlankLines(sF)
s = fin.readline ()
while s :
m = rClassCast.match (s)
if m :
fout.write (m.group('type') + ' ' + m.group('name') + '\n')
sName = m.group('name')
unresolved[sName] = CClass (sName)
s = fin.readline ()
if len(unresolved.keys()) == 0 :
no_klass_headers.append (sF)
else :
iFound = 0
unresolved_keys = unresolved.keys()
for sK in unresolved_keys :
klass = unresolved[sK]
sK = klass.name
# start from the beginning
fin.seek (0)
rObject = re.compile ("struct\s+_" + sK + "\s*(?P<p>\{*)\s*$")
rKlass = re.compile ("struct\s+_" + sK + "Class\s*(?P<p>\{*)\s*$")
s = fin.readline ()
meth = None
while s :
mO = rObject.match (s)
if mO :
if mO.group('p') != '{' :
s = fin.readline() # skip line with {
s = fin.readline() # read parent line
mV = rVarDecl.match(s)
try :
klass.parent = (mV.group('name'),
string.strip(mV.group('type')))
if verbose : print "class", sK, ":", mV.group('type')
except :
print 'klass.parent error (', sF, ') :', s
s = fin.readline ()
mV = rVarDecl.match(s)
if not mV and verbose : print s
while mV :
klass.AddAttr (mV.group('name'), mV.group('type'))
s = fin.readline ()
mV = rVarDecl.match(s)
else :
mK = rKlass.match (s)
if mK :
iFound = iFound + 1
klasses.append (unresolved[sK])
del unresolved[sK]
if mK.group('p') != '{' :
s = fin.readline() # skip line with {
s = fin.readline() # read parent line, validate it?
s = fin.readline()
mS = rSignal.match(s)
mM = rMethod.match(s)
while (mS or mM or meth) and s :
if mS :
klass.AddSignal (mS.group('name'), mS.group('type'))
elif mM :
meth = CMethod (mM.group('name'), mM.group('type'))
klass.AddMethod (mM.group('name'), meth)
meth.AddPar (mM.group('pname'), mM.group('ptype'))
if mM.group('done') == ');' :
meth = None # reset
elif meth :
mP = rPar.match (s)
if mP :
meth.AddPar (mP.group('pname'), mP.group('ptype'))
if mP.group('done') == ');' :
meth = None # reset
else :
# fixme: too drastic?
#meth = None # reset
pass
else :
break
s = fin.readline ()
mS = rSignal.match(s)
mM = rMethod.match(s)
s = fin.readline ()
if iFound == 0 :
no_klass_headers.append (sF)
else :
print sF + " (" + str(iFound) + ")"
for sF in no_klass_headers :
fout.write ("<No Klass>: " + sF + "\n")
print 'Klasses found:', len(klasses), '\nHeaders w/o classes:', len(no_klass_headers)
def CmpParent (a,b) :
'gets CClass, sort by parent type'
if a.parent == None and b.parent == None :
return 0
elif a.parent == None :
return 1
elif b.parent == None :
return -1
try :
i = cmp(a.parent[1],b.parent[1])
if i == 0 :
i = cmp(a.name, b.name)
elif cmp(a.name, b.parent[1]) == 0 :
#print a.name, ">", b.name
return 1
elif cmp(b.name, a.parent[1]) == 0 :
#print b.name, ">", a.name
return -1
return i
except :
print "Sort Error:", str(a.parent), str(b.parent)
return 0
klasses.sort(CmpParent)
# the sorting above does not sort everything, ensure parents are before childs
sorted = []
sorted_klasses = []
# first put in externals
parents = {}
for k in klasses :
if k.parent :
parents[k.parent[1]] = 1
for k in klasses :
if parents.has_key(k.name) :
del parents[k.name]
sorted.extend(parents.keys())
# sort the rest
while len(sorted_klasses) < len(klasses) :
before = len(sorted_klasses)
for k in klasses :
sK = k.name
sP = k.parent
if sK in sorted :
continue # don't add tem twice
elif k.parent is None :
sorted.append(sK)
elif k.parent[1] in sorted :
sorted.append(sK)
else :
continue
#print sK
sorted_klasses.append(k)
if len(sorted_klasses) == before :
if len(sorted_klasses) < len(klasses) :
unsorted = []
for k in klasses :
if not k.name in sorted :
unsorted.append(k.name)
print string.join(unsorted, ", "), "not sorted?"
break # avoid endless loop
klasses = sorted_klasses
def WritePython (fname) :
# generate Python declaration for validation
fpy = open (fname, "w")
for klass in klasses :
sParent = ""
if klass.parent :
sParent = " (" + klass.parent[1] + ")"
fpy.write ('class ' + klass.name + sParent + " :\n")
if len (klass.attrs) > 0 :
fpy.write ('\tdef __init__ (self) :\n')
for attr in klass.attrs :
fpy.write ('\t\tself.' + attr[0] + " = None # " + attr[1] + "\n")
if len (klass.signals) > 0 :
fpy.write ('\t\t # Signals\n')
for attr in klass.signals :
fpy.write ('\t\tself.' + attr[0] + " = None # " + attr[1] + "\n")
for s in klass.methods.keys() :
meth = klass.methods[s]
fpy.write ('\t#returns: ' + meth.retval + '\n\tdef ' + meth.name + ' (')
s1 = ''
s2 = ''
for par in meth.pars :
s1 = s1 + par[0] + ', '
s2 = s2 + par[1] + ', '
if len(s1) > 0 :
s1 = s1[:-2]
if len(s2) > 0 :
s2 = s2[:-2]
fpy.write (s1 + ') :\n')
fpy.write ('\t\t# ' + s2 + '\n')
fpy.write ('\t\tpass\n')
if len (klass.attrs) < 1 and len(klass.signals) < 1 and len(klass.methods) < 1 :
fpy.write ('\tpass\n') # make it valid Python
def WriteDia (fname) :
sStartDiagram = '''<?xml version="1.0" encoding="UTF-8"?>
<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
<dia:layer name="Background" visible="true">'''
sStartClass = '''
<dia:object type="UML - Class" version="0" id="O%d">
<dia:attribute name="elem_corner">
<dia:point val="%d,%d"/>
</dia:attribute>
<dia:attribute name="name">
<dia:string>#%s#</dia:string>
</dia:attribute>
<dia:attribute name="visible_attributes">
<dia:boolean val="true"/>
</dia:attribute>
<dia:attribute name="visible_operations">
<dia:boolean val="true"/>
</dia:attribute>'''
sFillColorAttribute = '''
<dia:attribute name="fill_color">
<dia:color val="%s"/>
</dia:attribute>'''
sStartAttributes = '''
<dia:attribute name="attributes">
<dia:composite type="umlattribute">'''
sDataAttribute = '''
<dia:attribute name="name">
<dia:string>#%s#</dia:string>
</dia:attribute>
<dia:attribute name="type">
<dia:string>#%s#</dia:string>
</dia:attribute>'''
sEndAttributes = '''
</dia:composite>
</dia:attribute>'''
sStartOperations = '''
<dia:attribute name="operations">'''
sStartOperation = '''
<dia:composite type="umloperation">
<dia:attribute name="name">
<dia:string>#%s#</dia:string>
</dia:attribute>
<dia:attribute name="type">
<dia:string>#%s#</dia:string>
</dia:attribute>
<dia:attribute name="parameters">
<dia:composite type="umlparameter">'''
sDataParameter = '''
<dia:attribute name="name">
<dia:string>#%s#</dia:string>
</dia:attribute>
<dia:attribute name="type">
<dia:string>#%s#</dia:string>
</dia:attribute>'''
sEndOperation = '''
</dia:composite>
</dia:attribute>
</dia:composite>'''
sEndOperations = '''
</dia:attribute>'''
sStartConnection = '''
<dia:object type="UML - Generalization" version="1" id="O%d">
<dia:attribute name="obj_pos">
<dia:point val="%d,%d"/>
</dia:attribute>
<dia:attribute name="orth_autoroute">
<dia:boolean val="true"/>
</dia:attribute>'''
sOrthPoints = '''
<dia:attribute name="orth_points">
<dia:point val="%d,%d"/>
<dia:point val="%d,%d"/>
<dia:point val="%d,%d"/>
<dia:point val="%d,%d"/>
</dia:attribute>'''
sEndObject = '''
</dia:object>'''
sEndDiagram = '''
</dia:layer>
</dia:diagram>'''
fdia = open (fname, "w")
nObject = 0
x = 0
y = 0
dx = 10
dy = 15
# maintain a dictionary of parents positions to at least place not everything above each other
positions = {}
connectFrom = {}
externals = {}
fdia.write (sStartDiagram)
for klass in klasses :
if klass.parent : # add every parent ...
parentName = klass.parent[1]
if externals.has_key (parentName) :
externals[parentName] += 1
else :
externals[parentName] = 1
for klass in klasses :
if externals.has_key (klass.name) : # ... but remove the internals
del externals[klass.name]
# write all 'external' parents
for s in externals.keys() :
externals[s] = (nObject, -1)
fdia.write(sStartClass % (nObject, x, y, s))
positions[s] = (x,y)
x += dx
fdia.write(sFillColorAttribute % ("#ffff00",))
# fixme: any more attributes?
fdia.write (sEndObject)
nObject += 1
for klass in klasses :
parentName = ""
if klass.parent :
parentName = klass.parent[1]
connectFrom[klass.name] = (nObject, parentName)
if positions.has_key (parentName) :
x = positions[parentName][0] # same x
y = positions[parentName][1] + dy # y below
else :
x += dx
y = dy
#fpy.write ('class ' + klass.name + sParent + " :\n")
fdia.write(sStartClass % (nObject, x, y, klass.name))
positions[klass.name] = (x, y)
if len (klass.attrs) > 0 :
fdia.write (sStartAttributes)
for attr in klass.attrs :
fdia.write (sDataAttribute % (attr[0], attr[1]))
fdia.write (sEndAttributes)
# if len (klass.signals) > 0 :
# fpy.write ('\t\t # Signals\n')
# for attr in klass.signals :
# fpy.write ('\t\tself.' + attr[0] + " = None # " + attr[1] + "\n")
# the differnence between signals and methods is in the attributes
if len (klass.signals) > 0 or len(klass.methods.keys()) > 0 :
fdia.write (sStartOperations)
for s in klass.methods.keys() :
meth = klass.methods[s]
fdia.write(sStartOperation % (meth.name, meth.retval))
# first parameter is supposed to be 'this' pointer: leave out
for par in meth.pars[1:] :
fdia.write (sDataParameter % (par[0], par[1]))
fdia.write (sEndOperation)
if len (klass.signals) > 0 or len(klass.methods.keys()) > 0 :
fdia.write (sEndOperations)
fdia.write (sEndObject)
nObject += 1
# write all connections
for sFrom in connectFrom.keys() :
iFrom = connectFrom[sFrom][0]
sTo = connectFrom[sFrom][1]
if connectFrom.has_key (sTo) :
iTo = connectFrom[sTo][0]
elif externals.has_key(sTo) :
iTo = externals[sTo][0]
else :
print "sFrom -> sTo?", sFrom, sTo
continue # something wrong?
nObject += 1
#fdia.write ('\n\t<!-- %s : %s -->' % (sFrom, sTo))
# just to give it some position (and stop Dia complaining)
if positions.has_key (sTo) :
x1, y1 = positions[sTo]
else :
x1, y1 = (dx, dy)
if positions.has_key (sFrom) :
x2, y2 = positions[sFrom]
else :
x2, y2 = (dx, dy)
fdia.write (sStartConnection % (nObject, x1+dx/2, y1+dy/2,))
fdia.write (sOrthPoints % (x1,y1, x1,(y1+y2)/2, x2,(y1+y2)/2, x2,y2 ))
# and connect
fdia.write ('''
<dia:connections>
<dia:connection handle="0" to="O%d" connection="6"/>
<dia:connection handle="1" to="O%d" connection="1"/>
</dia:connections>''' % (iTo, iFrom) )
fdia.write (sEndObject)
fdia.write (sEndDiagram)
print len(connectFrom.keys()), " connections"
WritePython (sPkg + "-generated.py")
WriteDia (sPkg + "-generated.dia")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.